summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h31
-rw-r--r--deps/v8/src/x64/assembler-x64.cc525
-rw-r--r--deps/v8/src/x64/assembler-x64.h142
-rw-r--r--deps/v8/src/x64/builtins-x64.cc187
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc3128
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h328
-rw-r--r--deps/v8/src/x64/codegen-x64-inl.h46
-rw-r--r--deps/v8/src/x64/codegen-x64.cc8695
-rw-r--r--deps/v8/src/x64/codegen-x64.h674
-rw-r--r--deps/v8/src/x64/cpu-x64.cc10
-rw-r--r--deps/v8/src/x64/debug-x64.cc15
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc75
-rw-r--r--deps/v8/src/x64/disasm-x64.cc148
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc1263
-rw-r--r--deps/v8/src/x64/ic-x64.cc604
-rw-r--r--deps/v8/src/x64/jump-target-x64.cc437
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc1611
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h63
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc8
-rw-r--r--deps/v8/src/x64/lithium-x64.cc576
-rw-r--r--deps/v8/src/x64/lithium-x64.h541
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc1575
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h1035
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc88
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h12
-rw-r--r--deps/v8/src/x64/register-allocator-x64-inl.h87
-rw-r--r--deps/v8/src/x64/register-allocator-x64.cc91
-rw-r--r--deps/v8/src/x64/register-allocator-x64.h43
-rw-r--r--deps/v8/src/x64/simulator-x64.h11
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc1137
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc1292
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h593
33 files changed, 7680 insertions, 17393 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index b082624f44..8db54f0752 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -30,7 +30,7 @@
#include "cpu.h"
#include "debug.h"
-#include "memory.h"
+#include "v8memory.h"
namespace v8 {
namespace internal {
@@ -61,9 +61,15 @@ void Assembler::emitw(uint16_t x) {
}
-void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::emit_code_target(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- RecordRelocInfo(rmode);
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+ } else {
+ RecordRelocInfo(rmode);
+ }
int current = code_targets_.length();
if (current > 0 && code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target.
@@ -372,11 +378,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
@@ -386,25 +393,25 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
template<typename StaticVisitor>
-void RelocInfo::Visit() {
+void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(target_object_address());
+ StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(this);
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (Debug::has_break_points() &&
+ } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 35c05b3ac3..745fdaeb8f 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -38,21 +38,38 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
-// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
-// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
-uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
-uint64_t CpuFeatures::enabled_ = 0;
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-void CpuFeatures::Probe(bool portable) {
- ASSERT(Heap::HasBeenSetup());
+
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
supported_ = kDefaultCpuFeatures;
- if (portable && Serializer::enabled()) {
+ if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
- Assembler assm(NULL, 0);
+ const int kBufferSize = 4 * KB;
+ VirtualMemory* memory = new VirtualMemory(kBufferSize);
+ if (!memory->IsReserved()) {
+ delete memory;
+ return;
+ }
+ ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+ if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+ delete memory;
+ return;
+ }
+
+ Assembler assm(NULL, memory->address(), kBufferSize);
Label cpuid, done;
#define __ assm.
// Save old rsp, since we are going to modify the stack.
@@ -82,7 +99,7 @@ void CpuFeatures::Probe(bool portable) {
// ecx:edx. Temporarily enable CPUID support because we know it's
// safe here.
__ bind(&cpuid);
- __ movq(rax, Immediate(1));
+ __ movl(rax, Immediate(1));
supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID);
__ cpuid();
@@ -116,28 +133,20 @@ void CpuFeatures::Probe(bool portable) {
__ ret(0);
#undef __
- CodeDesc desc;
- assm.GetCode(&desc);
- MaybeObject* maybe_code = Heap::CreateCode(desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>());
- Object* code;
- if (!maybe_code->ToObject(&code)) return;
- if (!code->IsCode()) return;
- PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
supported_ = probe();
found_by_runtime_probing_ = supported_;
found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
- found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+ found_by_runtime_probing_ &= ~os_guarantees;
// SSE2 and CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
ASSERT(IsSupported(SSE2));
ASSERT(IsSupported(CMOV));
+
+ delete memory;
}
@@ -191,12 +200,12 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Register constants.
const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
+ // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
+ 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
+ 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
};
@@ -335,18 +344,19 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
-byte* Assembler::spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size)
- : code_targets_(100), positions_recorder_(this) {
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ code_targets_(100),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -378,7 +388,6 @@ Assembler::Assembler(void* buffer, int buffer_size)
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- last_pc_ = NULL;
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
@@ -388,8 +397,10 @@ Assembler::Assembler(void* buffer, int buffer_size)
Assembler::~Assembler() {
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -409,8 +420,6 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
-
- Counters::reloc_info_size.Increment(desc->reloc_size);
}
@@ -434,7 +443,6 @@ void Assembler::CodeTargetAlign() {
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
- last_pc_ = NULL;
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
if (L->is_linked()) {
int current = L->pos();
@@ -450,6 +458,20 @@ void Assembler::bind_to(Label* L, int pos) {
int last_imm32 = pos - (current + sizeof(int32_t));
long_at_put(current, last_imm32);
}
+ while (L->is_near_linked()) {
+ int fixup_pos = L->near_link_pos();
+ int offset_to_next =
+ static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+ ASSERT(offset_to_next <= 0);
+ int disp = pos - (fixup_pos + sizeof(int8_t));
+ ASSERT(is_int8(disp));
+ set_byte_at(fixup_pos, disp);
+ if (offset_to_next < 0) {
+ L->link_to(fixup_pos + offset_to_next, Label::kNear);
+ } else {
+ L->UnuseNear();
+ }
+ }
L->bind_to(pos);
}
@@ -459,20 +481,6 @@ void Assembler::bind(Label* L) {
}
-void Assembler::bind(NearLabel* L) {
- ASSERT(!L->is_bound());
- last_pc_ = NULL;
- while (L->unresolved_branches_ > 0) {
- int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
- int disp = pc_offset() - branch_pos;
- ASSERT(is_int8(disp));
- set_byte_at(branch_pos - sizeof(int8_t), disp);
- L->unresolved_branches_--;
- }
- L->bind_to(pc_offset());
-}
-
-
void Assembler::GrowBuffer() {
ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -487,7 +495,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::MaxOldGenerationSize())) {
+ (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -512,17 +520,16 @@ void Assembler::GrowBuffer() {
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
pc_ += pc_delta;
- if (last_pc_ != NULL) {
- last_pc_ += pc_delta;
- }
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
@@ -560,7 +567,6 @@ void Assembler::emit_operand(int code, const Operand& adr) {
void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(reg, op);
emit(opcode);
emit_operand(reg, op);
@@ -569,7 +575,6 @@ void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
@@ -586,7 +591,6 @@ void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
@@ -607,7 +611,6 @@ void Assembler::arithmetic_op_16(byte opcode,
Register reg,
const Operand& rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(reg, rm_reg);
emit(opcode);
@@ -617,7 +620,6 @@ void Assembler::arithmetic_op_16(byte opcode,
void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT((opcode & 0xC6) == 2);
if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
@@ -636,7 +638,6 @@ void Assembler::arithmetic_op_32(byte opcode,
Register reg,
const Operand& rm_reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(reg, rm_reg);
emit(opcode);
emit_operand(reg, rm_reg);
@@ -647,7 +648,6 @@ void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -667,7 +667,6 @@ void Assembler::immediate_arithmetic_op(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -685,7 +684,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66); // Operand size override prefix.
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
@@ -707,7 +705,6 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66); // Operand size override prefix.
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
@@ -726,7 +723,6 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -747,7 +743,6 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
emit(0x83);
@@ -765,7 +760,6 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
ASSERT(is_int8(src.value_) || is_uint8(src.value_));
emit(0x80);
@@ -778,7 +772,6 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (dst.code() > 3) {
// Use 64-bit mode byte registers.
emit_rex_64(dst);
@@ -792,7 +785,6 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_rex_64(dst);
@@ -809,7 +801,6 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
void Assembler::shift(Register dst, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xD3);
emit_modrm(subcode, dst);
@@ -818,7 +809,6 @@ void Assembler::shift(Register dst, int subcode) {
void Assembler::shift_32(Register dst, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xD3);
emit_modrm(subcode, dst);
@@ -827,7 +817,6 @@ void Assembler::shift_32(Register dst, int subcode) {
void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_optional_rex_32(dst);
@@ -844,7 +833,6 @@ void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xA3);
@@ -854,7 +842,6 @@ void Assembler::bt(const Operand& dst, Register src) {
void Assembler::bts(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xAB);
@@ -865,7 +852,6 @@ void Assembler::bts(const Operand& dst, Register src) {
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
if (L->is_bound()) {
@@ -884,20 +870,20 @@ void Assembler::call(Label* L) {
}
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
- emit_code_target(target, rmode);
+ emit_code_target(target, rmode, ast_id);
}
void Assembler::call(Register adr) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: FF /2 r64.
emit_optional_rex_32(adr);
emit(0xFF);
@@ -908,7 +894,6 @@ void Assembler::call(Register adr) {
void Assembler::call(const Operand& op) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: FF /2 m64.
emit_optional_rex_32(op);
emit(0xFF);
@@ -923,7 +908,6 @@ void Assembler::call(const Operand& op) {
void Assembler::call(Address target) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1000 #32-bit disp.
emit(0xE8);
Address source = pc_ + 4;
@@ -935,13 +919,16 @@ void Assembler::call(Address target) {
void Assembler::clc() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF8);
}
+void Assembler::cld() {
+ EnsureSpace ensure_space(this);
+ emit(0xFC);
+}
+
void Assembler::cdq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x99);
}
@@ -956,7 +943,6 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
// 64-bit architecture.
ASSERT(cc >= 0); // Use mov for unconditional moves.
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
emit(0x0f);
@@ -973,7 +959,6 @@ void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
}
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: REX.W 0f 40 + cc /r.
emit_rex_64(dst, src);
emit(0x0f);
@@ -990,7 +975,6 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
}
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -1007,7 +991,6 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
}
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode: 0f 40 + cc /r.
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -1019,7 +1002,6 @@ void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
void Assembler::cmpb_al(Immediate imm8) {
ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x3c);
emit(imm8.value_);
}
@@ -1028,7 +1010,6 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x0F);
emit(0xA2);
}
@@ -1036,7 +1017,6 @@ void Assembler::cpuid() {
void Assembler::cqo() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64();
emit(0x99);
}
@@ -1044,7 +1024,6 @@ void Assembler::cqo() {
void Assembler::decq(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_modrm(0x1, dst);
@@ -1053,7 +1032,6 @@ void Assembler::decq(Register dst) {
void Assembler::decq(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_operand(1, dst);
@@ -1062,7 +1040,6 @@ void Assembler::decq(const Operand& dst) {
void Assembler::decl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0x1, dst);
@@ -1071,7 +1048,6 @@ void Assembler::decl(Register dst) {
void Assembler::decl(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_operand(1, dst);
@@ -1080,7 +1056,6 @@ void Assembler::decl(const Operand& dst) {
void Assembler::decb(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (dst.code() > 3) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst);
@@ -1092,7 +1067,6 @@ void Assembler::decb(Register dst) {
void Assembler::decb(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFE);
emit_operand(1, dst);
@@ -1101,7 +1075,6 @@ void Assembler::decb(const Operand& dst) {
void Assembler::enter(Immediate size) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xC8);
emitw(size.value_); // 16 bit operand, always.
emit(0);
@@ -1110,14 +1083,12 @@ void Assembler::enter(Immediate size) {
void Assembler::hlt() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF4);
}
void Assembler::idivq(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x7, src);
@@ -1126,7 +1097,6 @@ void Assembler::idivq(Register src) {
void Assembler::idivl(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0xF7);
emit_modrm(0x7, src);
@@ -1135,7 +1105,6 @@ void Assembler::idivl(Register src) {
void Assembler::imul(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x5, src);
@@ -1144,7 +1113,6 @@ void Assembler::imul(Register src) {
void Assembler::imul(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1154,7 +1122,6 @@ void Assembler::imul(Register dst, Register src) {
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1164,7 +1131,6 @@ void Assembler::imul(Register dst, const Operand& src) {
void Assembler::imul(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
if (is_int8(imm.value_)) {
emit(0x6B);
@@ -1180,7 +1146,6 @@ void Assembler::imul(Register dst, Register src, Immediate imm) {
void Assembler::imull(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1190,7 +1155,6 @@ void Assembler::imull(Register dst, Register src) {
void Assembler::imull(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xAF);
@@ -1200,7 +1164,6 @@ void Assembler::imull(Register dst, const Operand& src) {
void Assembler::imull(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
if (is_int8(imm.value_)) {
emit(0x6B);
@@ -1216,7 +1179,6 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
void Assembler::incq(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_modrm(0x0, dst);
@@ -1225,7 +1187,6 @@ void Assembler::incq(Register dst) {
void Assembler::incq(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xFF);
emit_operand(0, dst);
@@ -1234,7 +1195,6 @@ void Assembler::incq(const Operand& dst) {
void Assembler::incl(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_operand(0, dst);
@@ -1243,7 +1203,6 @@ void Assembler::incl(const Operand& dst) {
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
@@ -1252,12 +1211,11 @@ void Assembler::incl(Register dst) {
void Assembler::int3() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xCC);
}
-void Assembler::j(Condition cc, Label* L) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
if (cc == always) {
jmp(L);
return;
@@ -1265,7 +1223,6 @@ void Assembler::j(Condition cc, Label* L) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint4(cc));
if (L->is_bound()) {
const int short_size = 2;
@@ -1282,6 +1239,17 @@ void Assembler::j(Condition cc, Label* L) {
emit(0x80 | cc);
emitl(offs - long_size);
}
+ } else if (distance == Label::kNear) {
+ // 0111 tttn #8-bit disp
+ emit(0x70 | cc);
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ emit(disp);
} else if (L->is_linked()) {
// 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
@@ -1303,7 +1271,6 @@ void Assembler::j(Condition cc,
Handle<Code> target,
RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint4(cc));
// 0000 1111 1000 tttn #32-bit disp.
emit(0x0F);
@@ -1312,30 +1279,8 @@ void Assembler::j(Condition cc,
}
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
+void Assembler::jmp(Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 0111 tttn #8-bit disp
- emit(0x70 | cc);
- emit((offs - short_size) & 0xFF);
- } else {
- emit(0x70 | cc);
- emit(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
-void Assembler::jmp(Label* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
const int short_size = sizeof(int8_t);
const int long_size = sizeof(int32_t);
if (L->is_bound()) {
@@ -1350,7 +1295,17 @@ void Assembler::jmp(Label* L) {
emit(0xE9);
emitl(offs - long_size);
}
- } else if (L->is_linked()) {
+ } else if (distance == Label::kNear) {
+ emit(0xEB);
+ byte disp = 0x00;
+ if (L->is_near_linked()) {
+ int offset = L->near_link_pos() - pc_offset();
+ ASSERT(is_int8(offset));
+ disp = static_cast<byte>(offset & 0xFF);
+ }
+ L->link_to(pc_offset(), Label::kNear);
+ emit(disp);
+ } else if (L->is_linked()) {
// 1110 1001 #32-bit disp.
emit(0xE9);
emitl(L->pos());
@@ -1368,35 +1323,14 @@ void Assembler::jmp(Label* L) {
void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// 1110 1001 #32-bit disp.
emit(0xE9);
emit_code_target(target, rmode);
}
-void Assembler::jmp(NearLabel* L) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- if (L->is_bound()) {
- const int short_size = 2;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- ASSERT(is_int8(offs - short_size));
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- emit(0xEB);
- emit(0x00); // The displacement will be resolved later.
- L->link_to(pc_offset());
- }
-}
-
-
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode FF/4 r64.
emit_optional_rex_32(target);
emit(0xFF);
@@ -1406,7 +1340,6 @@ void Assembler::jmp(Register target) {
void Assembler::jmp(const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
// Opcode FF/4 m64.
emit_optional_rex_32(src);
emit(0xFF);
@@ -1416,7 +1349,6 @@ void Assembler::jmp(const Operand& src) {
void Assembler::lea(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x8D);
emit_operand(dst, src);
@@ -1425,7 +1357,6 @@ void Assembler::lea(Register dst, const Operand& src) {
void Assembler::leal(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x8D);
emit_operand(dst, src);
@@ -1434,7 +1365,6 @@ void Assembler::leal(Register dst, const Operand& src) {
void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x48); // REX.W
emit(0xA1);
emitq(reinterpret_cast<uintptr_t>(value), mode);
@@ -1448,15 +1378,18 @@ void Assembler::load_rax(ExternalReference ref) {
void Assembler::leave() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xC9);
}
void Assembler::movb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(dst, src);
+ if (dst.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst, src);
+ } else {
+ emit_optional_rex_32(dst, src);
+ }
emit(0x8A);
emit_operand(dst, src);
}
@@ -1464,18 +1397,21 @@ void Assembler::movb(Register dst, const Operand& src) {
void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(dst);
- emit(0xC6);
- emit_modrm(0x0, dst);
+ if (dst.code() > 3) {
+ emit_rex_32(dst);
+ }
+ emit(0xB0 + dst.low_bits());
emit(imm.value_);
}
void Assembler::movb(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit_rex_32(src, dst);
+ if (src.code() > 3) {
+ emit_rex_32(src, dst);
+ } else {
+ emit_optional_rex_32(src, dst);
+ }
emit(0x88);
emit_operand(src, dst);
}
@@ -1483,7 +1419,6 @@ void Assembler::movb(const Operand& dst, Register src) {
void Assembler::movw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x89);
@@ -1493,7 +1428,6 @@ void Assembler::movw(const Operand& dst, Register src) {
void Assembler::movl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x8B);
emit_operand(dst, src);
@@ -1502,7 +1436,6 @@ void Assembler::movl(Register dst, const Operand& src) {
void Assembler::movl(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_optional_rex_32(src, dst);
emit(0x89);
@@ -1517,7 +1450,6 @@ void Assembler::movl(Register dst, Register src) {
void Assembler::movl(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src, dst);
emit(0x89);
emit_operand(src, dst);
@@ -1526,27 +1458,23 @@ void Assembler::movl(const Operand& dst, Register src) {
void Assembler::movl(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+ emit(value);
}
void Assembler::movl(Register dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+ emit(0xB8 + dst.low_bits());
+ emit(value);
}
void Assembler::movq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x8B);
emit_operand(dst, src);
@@ -1555,7 +1483,6 @@ void Assembler::movq(Register dst, const Operand& src) {
void Assembler::movq(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_rex_64(src, dst);
emit(0x89);
@@ -1570,7 +1497,6 @@ void Assembler::movq(Register dst, Register src) {
void Assembler::movq(Register dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xC7);
emit_modrm(0x0, dst);
@@ -1580,7 +1506,6 @@ void Assembler::movq(Register dst, Immediate value) {
void Assembler::movq(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x89);
emit_operand(src, dst);
@@ -1592,7 +1517,6 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
// address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value), rmode);
@@ -1614,7 +1538,6 @@ void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// value.
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(value, rmode);
@@ -1629,7 +1552,6 @@ void Assembler::movq(Register dst, ExternalReference ref) {
void Assembler::movq(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xC7);
emit_operand(0, dst);
@@ -1641,7 +1563,6 @@ void Assembler::movq(const Operand& dst, Immediate value) {
// (as a 32-bit offset sign extended to 64-bit).
void Assembler::movl(const Operand& dst, Label* src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0, dst);
@@ -1671,9 +1592,8 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
} else {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(value->IsHeapObject());
- ASSERT(!Heap::InNewSpace(*value));
+ ASSERT(!HEAP->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
@@ -1683,7 +1603,6 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
void Assembler::movsxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xBE);
@@ -1693,7 +1612,6 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
void Assembler::movsxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x0F);
emit(0xBF);
@@ -1703,7 +1621,6 @@ void Assembler::movsxwq(Register dst, const Operand& src) {
void Assembler::movsxlq(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x63);
emit_modrm(dst, src);
@@ -1712,7 +1629,6 @@ void Assembler::movsxlq(Register dst, Register src) {
void Assembler::movsxlq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst, src);
emit(0x63);
emit_operand(dst, src);
@@ -1721,7 +1637,6 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
void Assembler::movzxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
@@ -1731,7 +1646,6 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
void Assembler::movzxbl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
@@ -1741,7 +1655,6 @@ void Assembler::movzxbl(Register dst, const Operand& src) {
void Assembler::movzxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1751,7 +1664,6 @@ void Assembler::movzxwq(Register dst, const Operand& src) {
void Assembler::movzxwl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1761,7 +1673,6 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
void Assembler::repmovsb() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit(0xA4);
}
@@ -1769,7 +1680,6 @@ void Assembler::repmovsb() {
void Assembler::repmovsw() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66); // Operand size override.
emit(0xF3);
emit(0xA4);
@@ -1778,7 +1688,6 @@ void Assembler::repmovsw() {
void Assembler::repmovsl() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit(0xA5);
}
@@ -1786,7 +1695,6 @@ void Assembler::repmovsl() {
void Assembler::repmovsq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_rex_64();
emit(0xA5);
@@ -1795,7 +1703,6 @@ void Assembler::repmovsq() {
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x4, src);
@@ -1804,7 +1711,6 @@ void Assembler::mul(Register src) {
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0x3, dst);
@@ -1813,7 +1719,6 @@ void Assembler::neg(Register dst) {
void Assembler::negl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x3, dst);
@@ -1822,7 +1727,6 @@ void Assembler::negl(Register dst) {
void Assembler::neg(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_operand(3, dst);
@@ -1831,14 +1735,12 @@ void Assembler::neg(const Operand& dst) {
void Assembler::nop() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x90);
}
void Assembler::not_(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0x2, dst);
@@ -1847,7 +1749,6 @@ void Assembler::not_(Register dst) {
void Assembler::not_(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(dst);
emit(0xF7);
emit_operand(2, dst);
@@ -1856,7 +1757,6 @@ void Assembler::not_(const Operand& dst) {
void Assembler::notl(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x2, dst);
@@ -1881,7 +1781,6 @@ void Assembler::nop(int n) {
ASSERT(1 <= n);
ASSERT(n <= 9);
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
switch (n) {
case 1:
emit(0x90);
@@ -1952,7 +1851,6 @@ void Assembler::nop(int n) {
void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
@@ -1960,7 +1858,6 @@ void Assembler::pop(Register dst) {
void Assembler::pop(const Operand& dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0x8F);
emit_operand(0, dst);
@@ -1969,14 +1866,12 @@ void Assembler::pop(const Operand& dst) {
void Assembler::popfq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9D);
}
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
@@ -1984,7 +1879,6 @@ void Assembler::push(Register src) {
void Assembler::push(const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0xFF);
emit_operand(6, src);
@@ -1993,7 +1887,6 @@ void Assembler::push(const Operand& src) {
void Assembler::push(Immediate value) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (is_int8(value.value_)) {
emit(0x6A);
emit(value.value_); // Emit low byte of value.
@@ -2006,7 +1899,6 @@ void Assembler::push(Immediate value) {
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x68);
emitl(imm32);
}
@@ -2014,14 +1906,12 @@ void Assembler::push_imm32(int32_t imm32) {
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9C);
}
void Assembler::rdtsc() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x0F);
emit(0x31);
}
@@ -2029,7 +1919,6 @@ void Assembler::rdtsc() {
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint16(imm16));
if (imm16 == 0) {
emit(0xC3);
@@ -2047,7 +1936,6 @@ void Assembler::setcc(Condition cc, Register reg) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
ASSERT(is_uint4(cc));
if (reg.code() > 3) { // Use x64 byte registers, where different.
emit_rex_32(reg);
@@ -2060,7 +1948,6 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::shld(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xA5);
@@ -2070,7 +1957,6 @@ void Assembler::shld(Register dst, Register src) {
void Assembler::shrd(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(src, dst);
emit(0x0F);
emit(0xAD);
@@ -2080,7 +1966,6 @@ void Assembler::shrd(Register dst, Register src) {
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
emit_rex_64(other);
@@ -2099,7 +1984,6 @@ void Assembler::xchg(Register dst, Register src) {
void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x48); // REX.W
emit(0xA3);
emitq(reinterpret_cast<uintptr_t>(dst), mode);
@@ -2113,7 +1997,6 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_rex_32(src, dst);
emit(0x84);
@@ -2132,7 +2015,6 @@ void Assembler::testb(Register dst, Register src) {
void Assembler::testb(Register reg, Immediate mask) {
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (reg.is(rax)) {
emit(0xA8);
emit(mask.value_); // Low byte emitted.
@@ -2151,7 +2033,6 @@ void Assembler::testb(Register reg, Immediate mask) {
void Assembler::testb(const Operand& op, Immediate mask) {
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(rax, op);
emit(0xF6);
emit_operand(rax, op); // Operation code 0
@@ -2161,7 +2042,6 @@ void Assembler::testb(const Operand& op, Immediate mask) {
void Assembler::testb(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (reg.code() > 3) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg, op);
@@ -2175,7 +2055,6 @@ void Assembler::testb(const Operand& op, Register reg) {
void Assembler::testl(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_optional_rex_32(src, dst);
emit(0x85);
@@ -2195,7 +2074,6 @@ void Assembler::testl(Register reg, Immediate mask) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (reg.is(rax)) {
emit(0xA9);
emit(mask);
@@ -2215,7 +2093,6 @@ void Assembler::testl(const Operand& op, Immediate mask) {
return;
}
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(rax, op);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
@@ -2225,7 +2102,6 @@ void Assembler::testl(const Operand& op, Immediate mask) {
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_rex_64(reg, op);
emit(0x85);
emit_operand(reg, op);
@@ -2234,7 +2110,6 @@ void Assembler::testq(const Operand& op, Register reg) {
void Assembler::testq(Register dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (src.low_bits() == 4) {
emit_rex_64(src, dst);
emit(0x85);
@@ -2249,7 +2124,6 @@ void Assembler::testq(Register dst, Register src) {
void Assembler::testq(Register dst, Immediate mask) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
if (dst.is(rax)) {
emit_rex_64();
emit(0xA9);
@@ -2268,14 +2142,12 @@ void Assembler::testq(Register dst, Immediate mask) {
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xD9, 0xC0, i);
}
void Assembler::fld1() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE8);
}
@@ -2283,7 +2155,6 @@ void Assembler::fld1() {
void Assembler::fldz() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xEE);
}
@@ -2291,7 +2162,6 @@ void Assembler::fldz() {
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xEB);
}
@@ -2299,7 +2169,6 @@ void Assembler::fldpi() {
void Assembler::fldln2() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xED);
}
@@ -2307,7 +2176,6 @@ void Assembler::fldln2() {
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
@@ -2316,7 +2184,6 @@ void Assembler::fld_s(const Operand& adr) {
void Assembler::fld_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
@@ -2325,7 +2192,6 @@ void Assembler::fld_d(const Operand& adr) {
void Assembler::fstp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
@@ -2334,7 +2200,6 @@ void Assembler::fstp_s(const Operand& adr) {
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(3, adr);
@@ -2344,14 +2209,12 @@ void Assembler::fstp_d(const Operand& adr) {
void Assembler::fstp(int index) {
ASSERT(is_uint3(index));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xD8, index);
}
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
@@ -2360,7 +2223,6 @@ void Assembler::fild_s(const Operand& adr) {
void Assembler::fild_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
@@ -2369,7 +2231,6 @@ void Assembler::fild_d(const Operand& adr) {
void Assembler::fistp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
@@ -2379,7 +2240,6 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(1, adr);
@@ -2389,7 +2249,6 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fisttp_d(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(1, adr);
@@ -2398,7 +2257,6 @@ void Assembler::fisttp_d(const Operand& adr) {
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
@@ -2407,7 +2265,6 @@ void Assembler::fist_s(const Operand& adr) {
void Assembler::fistp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(7, adr);
@@ -2416,7 +2273,6 @@ void Assembler::fistp_d(const Operand& adr) {
void Assembler::fabs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE1);
}
@@ -2424,7 +2280,6 @@ void Assembler::fabs() {
void Assembler::fchs() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE0);
}
@@ -2432,7 +2287,6 @@ void Assembler::fchs() {
void Assembler::fcos() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xFF);
}
@@ -2440,7 +2294,6 @@ void Assembler::fcos() {
void Assembler::fsin() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xFE);
}
@@ -2448,7 +2301,6 @@ void Assembler::fsin() {
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF1);
}
@@ -2456,21 +2308,18 @@ void Assembler::fyl2x() {
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xC0, i);
}
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xE8, i);
}
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDA);
emit_operand(4, adr);
@@ -2479,56 +2328,48 @@ void Assembler::fisub_s(const Operand& adr) {
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xC8, i);
}
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDC, 0xF8, i);
}
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xC0, i);
}
void Assembler::fsubp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xE8, i);
}
void Assembler::fsubrp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xE0, i);
}
void Assembler::fmulp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xC8, i);
}
void Assembler::fdivp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDE, 0xF8, i);
}
void Assembler::fprem() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF8);
}
@@ -2536,7 +2377,6 @@ void Assembler::fprem() {
void Assembler::fprem1() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF5);
}
@@ -2544,14 +2384,12 @@ void Assembler::fprem1() {
void Assembler::fxch(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xD9, 0xC8, i);
}
void Assembler::fincstp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xF7);
}
@@ -2559,14 +2397,12 @@ void Assembler::fincstp() {
void Assembler::ffree(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xC0, i);
}
void Assembler::ftst() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xE4);
}
@@ -2574,14 +2410,12 @@ void Assembler::ftst() {
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit_farith(0xDD, 0xE8, i);
}
void Assembler::fucompp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDA);
emit(0xE9);
}
@@ -2589,7 +2423,6 @@ void Assembler::fucompp() {
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDB);
emit(0xE8 + i);
}
@@ -2597,7 +2430,6 @@ void Assembler::fucomi(int i) {
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDF);
emit(0xE9);
}
@@ -2605,7 +2437,6 @@ void Assembler::fucomip() {
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDE);
emit(0xD9);
}
@@ -2613,7 +2444,6 @@ void Assembler::fcompp() {
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDF);
emit(0xE0);
}
@@ -2621,14 +2451,12 @@ void Assembler::fnstsw_ax() {
void Assembler::fwait() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9B);
}
void Assembler::frndint() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xD9);
emit(0xFC);
}
@@ -2636,7 +2464,6 @@ void Assembler::frndint() {
void Assembler::fnclex() {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xDB);
emit(0xE2);
}
@@ -2646,7 +2473,6 @@ void Assembler::sahf() {
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
// in 64-bit mode. Test CpuID.
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x9E);
}
@@ -2662,7 +2488,6 @@ void Assembler::emit_farith(int b1, int b2, int i) {
void Assembler::movd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2673,7 +2498,6 @@ void Assembler::movd(XMMRegister dst, Register src) {
void Assembler::movd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x0F);
@@ -2684,7 +2508,6 @@ void Assembler::movd(Register dst, XMMRegister src) {
void Assembler::movq(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2695,7 +2518,6 @@ void Assembler::movq(XMMRegister dst, Register src) {
void Assembler::movq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(src, dst);
emit(0x0F);
@@ -2704,10 +2526,26 @@ void Assembler::movq(Register dst, XMMRegister src) {
}
+void Assembler::movq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (dst.low_bits() == 4) {
+ // Avoid unnecessary SIB byte.
+ emit(0xf3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x7e);
+ emit_sse_operand(dst, src);
+ } else {
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0xD6);
+ emit_sse_operand(src, dst);
+ }
+}
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(src, dst);
emit(0x0F);
@@ -2717,9 +2555,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2731,7 +2567,6 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2744,7 +2579,6 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(src, dst);
emit(0x0F);
@@ -2755,7 +2589,6 @@ void Assembler::movsd(const Operand& dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2766,7 +2599,6 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2775,9 +2607,44 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
}
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (src.low_bits() == 4) {
+ // Try to avoid an unnecessary SIB byte.
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x29);
+ emit_sse_operand(src, dst);
+ } else {
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x28);
+ emit_sse_operand(dst, src);
+ }
+}
+
+
+void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (src.low_bits() == 4) {
+ // Try to avoid an unnecessary SIB byte.
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x29);
+ emit_sse_operand(src, dst);
+ } else {
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x28);
+ emit_sse_operand(dst, src);
+ }
+}
+
+
void Assembler::movss(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3); // single
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2788,7 +2655,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(const Operand& src, XMMRegister dst) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3); // single
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2799,7 +2665,6 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2810,7 +2675,6 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttss2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2821,7 +2685,6 @@ void Assembler::cvttss2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2832,7 +2695,6 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2843,7 +2705,6 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2854,7 +2715,6 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2865,7 +2725,6 @@ void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2876,7 +2735,6 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2887,7 +2745,6 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2898,7 +2755,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2909,7 +2765,6 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2920,7 +2775,6 @@ void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2931,7 +2785,6 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2942,7 +2795,6 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
@@ -2953,7 +2805,6 @@ void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2964,7 +2815,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2975,7 +2825,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2986,7 +2835,6 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2997,7 +2845,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3008,7 +2855,6 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3019,7 +2865,6 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3028,9 +2873,17 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3041,7 +2894,6 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -3052,7 +2904,6 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -3061,9 +2912,23 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::roundsd(XMMRegister dst, XMMRegister src,
+ Assembler::RoundingMode mode) {
+ ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x3a);
+ emit(0x0b);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -3114,7 +2979,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Serializer::TooLateToEnableNow();
}
#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
+ if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index f6cd570936..2971db845e 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -93,8 +93,8 @@ struct Register {
// rbp - frame pointer
// rsi - context register
// r10 - fixed scratch register
+ // r12 - smi constant register
// r13 - root register
- // r15 - smi constant register
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 10;
@@ -120,7 +120,7 @@ struct Register {
"r9",
"r11",
"r14",
- "r12"
+ "r15"
};
return names[index];
}
@@ -327,22 +327,6 @@ inline Condition ReverseCondition(Condition cc) {
}
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -395,6 +379,13 @@ class Operand BASE_EMBEDDED {
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
+ // Queries related to the size of the generated instruction.
+ // Whether the generated instruction will have a REX prefix.
+ bool requires_rex() const { return rex_ != 0; }
+ // Size of the ModR/M, SIB and displacement parts of the generated
+ // instruction.
+ int operand_size() const { return len_; }
+
private:
byte rex_;
byte buf_[6];
@@ -431,9 +422,11 @@ class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- static void Probe(bool portable);
+ static void Probe();
+
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -441,40 +434,71 @@ class CpuFeatures : public AllStatic {
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
+
+#ifdef DEBUG
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
- return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ uint64_t enabled = isolate->enabled_cpu_features();
+ return (enabled & (V8_UINT64_C(1) << f)) != 0;
}
+#endif
+
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f) {
- uint64_t mask = (V8_UINT64_C(1) << f);
+ uint64_t mask = V8_UINT64_C(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
- old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= mask;
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = isolate_->enabled_cpu_features();
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
}
- ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
+ Isolate* isolate_;
uint64_t old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
+
private:
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
+ // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
+ // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
static uint64_t supported_;
- static uint64_t enabled_;
static uint64_t found_by_runtime_probing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
-class Assembler : public Malloced {
+class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
// space to write an instruction and its relocation information.
@@ -501,9 +525,12 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -613,6 +640,7 @@ class Assembler : public Malloced {
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
+ void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -649,7 +677,7 @@ class Assembler : public Malloced {
// Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value);
- // New x64 instructions to load a 64-bit immediate into a register.
+ // Instructions to load a 64-bit immediate into a register.
// All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
@@ -674,7 +702,7 @@ class Assembler : public Malloced {
void repmovsl();
void repmovsq();
- // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+ // Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1109,6 +1137,7 @@ class Assembler : public Malloced {
// Miscellaneous
void clc();
+ void cld();
void cpuid();
void hlt();
void int3();
@@ -1134,12 +1163,13 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
- void bind(NearLabel* L);
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
- void call(Handle<Code> target, RelocInfo::Mode rmode);
+ void call(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
@@ -1156,7 +1186,8 @@ class Assembler : public Malloced {
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
- void jmp(Label* L); // unconditional jump to L
+ // Unconditional jump to L
+ void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(Handle<Code> target, RelocInfo::Mode rmode);
// Jump near absolute indirect (r64)
@@ -1165,16 +1196,12 @@ class Assembler : public Malloced {
// Jump near absolute indirect (m64)
void jmp(const Operand& src);
- // Short jump
- void jmp(NearLabel* L);
-
// Conditional jumps
- void j(Condition cc, Label* L);
+ void j(Condition cc,
+ Label* L,
+ Label::Distance distance = Label::kFar);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
- // Conditional short jump
- void j(Condition cc, NearLabel* L, Hint hint = no_hint);
-
// Floating-point operations
void fld(int i);
@@ -1247,15 +1274,24 @@ class Assembler : public Malloced {
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
+ void movq(XMMRegister dst, XMMRegister src);
void extractps(Register dst, XMMRegister src, byte imm8);
- void movsd(const Operand& dst, XMMRegister src);
+ // Don't use this unless it's important to keep the
+ // top half of the destination register unchanged.
+ // Used movaps when moving double values and movq for integer
+ // values in xmm registers.
void movsd(XMMRegister dst, XMMRegister src);
+
+ void movsd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
+ void movapd(XMMRegister dst, XMMRegister src);
+ void movaps(XMMRegister dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@@ -1287,11 +1323,21 @@ class Assembler : public Malloced {
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+ };
+
+ void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void movmskpd(Register dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field.
@@ -1343,6 +1389,9 @@ class Assembler : public Malloced {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
+ protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
@@ -1361,7 +1410,9 @@ class Assembler : public Malloced {
inline void emitl(uint32_t x);
inline void emitq(uint64_t x, RelocInfo::Mode rmode);
inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
+ inline void emit_code_target(Handle<Code> target,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
void emit(Immediate x) { emitl(x.value_); }
// Emits a REX prefix that encodes a 64-bit operand size and
@@ -1536,18 +1587,17 @@ class Assembler : public Malloced {
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
- static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
List< Handle<Code> > code_targets_;
- // push-pop elimination
- byte* last_pc_;
PositionsRecorder positions_recorder_;
+
+ bool emit_debug_code_;
+
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index a2dd6cd42a..076398906c 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
__ addq(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id), 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
@@ -96,9 +96,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// rax: number of arguments
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing rax).
- __ movq(rbx, Immediate(0));
+ __ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -127,7 +128,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address();
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
__ movq(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
@@ -339,14 +340,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
if (is_api_function) {
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code = Handle<Code>(
- Builtins::builtin(Builtins::HandleApiCallConstruct));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+ CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Restore context from the frame.
@@ -360,8 +362,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ JumpIfSmi(rax, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
@@ -379,7 +382,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
__ ret(0);
}
@@ -492,12 +496,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
// Expects rdi to hold function pointer.
- __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the JS frame. Notice that this also removes the empty
@@ -525,17 +530,23 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Push a copy of the function onto the stack.
__ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
__ push(rdi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyCompile, 1);
+
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
__ pop(rdi);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
}
@@ -545,17 +556,23 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Push a copy of the function onto the stack.
__ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
__ push(rdi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyRecompile, 1);
- // Restore function and tear down temporary frame.
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore function.
__ pop(rdi);
+
+ // Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
}
@@ -575,15 +592,15 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
- NearLabel not_no_registers, not_tos_rax;
+ Label not_no_registers, not_tos_rax;
__ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
- __ j(not_equal, &not_no_registers);
+ __ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
__ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
- __ j(not_equal, &not_tos_rax);
+ __ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(&not_tos_rax);
@@ -630,7 +647,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testq(rax, rax);
__ j(not_zero, &done);
__ pop(rbx);
- __ Push(Factory::undefined_value());
+ __ Push(FACTORY->undefined_value());
__ push(rbx);
__ incq(rax);
__ bind(&done);
@@ -657,19 +674,24 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &shift_arguments);
+ // Do not transform the receiver for natives.
+ // SharedFunctionInfo is already loaded into rbx.
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_zero, &shift_arguments);
+
// Compute the receiver in non-strict mode.
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ JumpIfSmi(rbx, &convert_to_object);
+ __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &convert_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &shift_arguments);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
@@ -685,7 +707,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ LeaveInternalFrame();
// Restore the function to rdi.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ jmp(&patch_receiver);
+ __ jmp(&patch_receiver, Label::kNear);
// Use the global receiver object from the called function as the
// receiver.
@@ -733,7 +755,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &function);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ __ SetCallKind(rcx, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
}
@@ -746,13 +769,15 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ SetCallKind(rcx, CALL_AS_METHOD);
__ cmpq(rax, rbx);
__ j(not_equal,
- Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+ __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -821,8 +846,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &push_receiver);
+ // Do not transform the receiver for natives.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &push_receiver);
+
// Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object);
+ __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -830,17 +860,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &call_to_object);
- __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, &push_receiver);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
__ push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movq(rbx, rax);
- __ jmp(&push_receiver);
+ __ jmp(&push_receiver, Label::kNear);
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
@@ -863,7 +892,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Initialize();
__ Call(ic, RelocInfo::CODE_TARGET);
// It is important that we do not have a test instruction after the
// call. A test instruction after the call is used to indicate that
@@ -886,7 +916,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ParameterCount actual(rax);
__ SmiToInteger32(rax, rax);
__ movq(rdi, Operand(rbp, kFunctionOffset));
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove function, receiver, and arguments
@@ -935,7 +966,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch2: start of next object
__ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
__ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
@@ -943,7 +974,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// fixed array.
if (initial_capacity == 0) {
__ Move(FieldOperand(result, JSArray::kElementsOffset),
- Factory::empty_fixed_array());
+ FACTORY->empty_fixed_array());
return;
}
@@ -960,7 +991,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// scratch1: elements array
// scratch2: start of next object
__ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
__ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
Smi::FromInt(initial_capacity));
@@ -968,7 +999,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
static const int kLoopUnfoldLimit = 4;
ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
- __ Move(scratch3, Factory::the_hole_value());
+ __ Move(scratch3, FACTORY->the_hole_value());
if (initial_capacity <= kLoopUnfoldLimit) {
// Use a scratch register here to have only one reloc info when unfolding
// the loop.
@@ -1052,7 +1083,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, Factory::empty_fixed_array());
+ __ Move(elements_array, FACTORY->empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
__ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1071,7 +1102,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- Factory::fixed_array_map());
+ FACTORY->fixed_array_map());
Label not_empty_2, fill_array;
__ SmiTest(array_size);
__ j(not_zero, &not_empty_2);
@@ -1092,7 +1123,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
- __ Move(scratch, Factory::the_hole_value());
+ __ Move(scratch, FACTORY->the_hole_value());
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
@@ -1137,7 +1168,8 @@ static void ArrayNativeCode(MacroAssembler* masm,
r8,
kPreallocatedArrayElements,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(kPointerSize);
@@ -1168,7 +1200,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r9,
true,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(counters->array_function_native(), 1);
__ movq(rax, rbx);
__ ret(2 * kPointerSize);
@@ -1190,7 +1222,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r9,
false,
call_generic_code);
- __ IncrementCounter(&Counters::array_function_native, 1);
+ __ IncrementCounter(counters->array_function_native(), 1);
// rax: argc
// rbx: JSArray
@@ -1248,7 +1280,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
+ // Initial map for the builtin Array functions should be maps.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
@@ -1264,8 +1296,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
- Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
- Handle<Code> array_code(code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -1280,11 +1312,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Label generic_constructor;
if (FLAG_debug_code) {
- // The array construct code is only set for the builtin Array function which
- // does always have a map.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rbx);
- __ cmpq(rdi, rbx);
- __ Check(equal, "Unexpected Array function");
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
@@ -1301,8 +1330,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
- Handle<Code> generic_construct_stub(code);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
@@ -1324,11 +1353,11 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Push the function on the stack.
__ push(rdi);
- // Preserve the number of arguments on the stack. Must preserve both
- // rax and rbx because these registers are used when copying the
+ // Preserve the number of arguments on the stack. Must preserve rax,
+ // rbx and rcx because these registers are used when copying the
// arguments and the receiver.
- __ Integer32ToSmi(rcx, rax);
- __ push(rcx);
+ __ Integer32ToSmi(r8, rax);
+ __ push(r8);
}
@@ -1352,11 +1381,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
+ // -- rcx : call kind information
// -- rdx : code entry to call
// -----------------------------------
Label invoke, dont_adapt_arguments;
- __ IncrementCounter(&Counters::arguments_adaptors, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->arguments_adaptors(), 1);
Label enough, too_few;
__ cmpq(rax, rbx);
@@ -1371,14 +1402,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
+ __ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(rcx);
+ __ incq(r8);
__ push(Operand(rax, 0));
__ subq(rax, Immediate(kPointerSize));
- __ cmpq(rcx, rbx);
+ __ cmpq(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1390,23 +1421,23 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ movq(rcx, Immediate(-1)); // account for receiver
+ __ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(rcx);
+ __ incq(r8);
__ push(Operand(rdi, 0));
__ subq(rdi, Immediate(kPointerSize));
- __ cmpq(rcx, rax);
+ __ cmpq(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(rcx);
+ __ incq(r8);
__ push(kScratchRegister);
- __ cmpq(rcx, rbx);
+ __ cmpq(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
@@ -1455,17 +1486,17 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
- NearLabel skip;
+ Label skip;
__ SmiCompare(rax, Smi::FromInt(-1));
- __ j(not_equal, &skip);
+ __ j(not_equal, &skip, Label::kNear);
__ ret(0);
// If we decide not to perform on-stack replacement we perform a
// stack guard check to enable interrupts.
__ bind(&stack_check);
- NearLabel ok;
+ Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 9da22bd675..81514d1e92 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -40,15 +40,15 @@ namespace internal {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
- NearLabel check_heap_number, call_builtin;
+ Label check_heap_number, call_builtin;
__ SmiTest(rax);
- __ j(not_zero, &check_heap_number);
+ __ j(not_zero, &check_heap_number, Label::kNear);
__ Ret();
__ bind(&check_heap_number);
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &call_builtin);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_builtin, Label::kNear);
__ Ret();
__ bind(&call_builtin);
@@ -68,11 +68,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
__ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
// Initialize the rest of the function. We don't have to update the
@@ -104,7 +108,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ pop(rdx);
__ push(rsi);
__ push(rdx);
- __ Push(Factory::false_value());
+ __ PushRoot(Heap::kFalseValueRootIndex);
__ push(rcx); // Restore return address.
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
@@ -121,18 +125,17 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
// Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL.
__ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
__ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
@@ -148,7 +151,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -227,212 +230,71 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- NearLabel false_result, true_result, not_string;
+ Label false_result, true_result, not_string;
+ const Register map = rdx;
+
__ movq(rax, Operand(rsp, 1 * kPointerSize));
- // 'null' => false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ // undefined -> false
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &false_result);
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+ // Boolean -> its value
+ __ CompareRoot(rax, Heap::kFalseValueRootIndex);
+ __ j(equal, &false_result);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(equal, &true_result);
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result);
+ // Smis: 0 -> false, all other -> true
+ __ Cmp(rax, Smi::FromInt(0));
+ __ j(equal, &false_result);
+ __ JumpIfSmi(rax, &true_result);
+
+ // 'null' -> false.
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result, Label::kNear);
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(above_equal, &true_result);
+ // Get the map of the heap object.
+ __ movq(map, FieldOperand(rax, HeapObject::kMapOffset));
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
- __ j(above_equal, &not_string);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
- __ j(zero, &false_result);
- __ jmp(&true_result);
+ // Undetectable -> false.
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result, Label::kNear);
+
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &true_result, Label::kNear);
+
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string, Label::kNear);
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), Immediate(0));
+ __ j(zero, &false_result, Label::kNear);
+ __ jmp(&true_result, Label::kNear);
__ bind(&not_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result);
- // HeapNumber => false iff +0, -0, or NaN.
+ // HeapNumber -> false iff +0, -0, or NaN.
// These three cases set the zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result, Label::kNear);
+ __ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result);
+ __ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in rax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ movq(rax, Immediate(1));
+ __ Set(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ Set(rax, 0);
+ __ Set(tos_, 0);
__ ret(1 * kPointerSize);
}
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
@@ -452,486 +314,198 @@ class FloatingPointHelper : public AllStatic {
// As above, but we know the operands to be numbers. In that case,
// conversion can't fail.
static void LoadNumbersAsIntegers(MacroAssembler* masm);
+
+ // Tries to convert two values to smis losslessly.
+ // This fails if either argument is not a Smi nor a HeapNumber,
+ // or if it's a HeapNumber with a value that can't be converted
+ // losslessly to a Smi. In that case, control transitions to the
+ // on_not_smis label.
+ // On success, either control goes to the on_success label (if one is
+ // provided), or it falls through at the end of the code (if on_success
+ // is NULL).
+ // On success, both first and second holds Smi tagged values.
+ // One of first or second must be non-Smi when entering.
+ static void NumbersToSmis(MacroAssembler* masm,
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* on_success,
+ Label* on_not_smis);
};
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus, Label::kNear);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done, Label::kNear);
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done, Label::kNear);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
} else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
}
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
+ __ bind(&done);
+}
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case UnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
break;
-
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
+ case UnaryOpIC::SMI:
+ GenerateSmiStub(masm);
break;
-
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
+ case UnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
break;
-
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis);
+ case UnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
break;
+ }
+}
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(rcx); // Save return address.
+ __ push(rax);
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(MinorKey()));
+ __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(operand_type_));
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
+ __ push(rcx); // Push return address.
- default:
- UNREACHABLE();
- break;
- }
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
switch (op_) {
- case Token::ADD:
case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
- }
-
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
-
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
+ GenerateSmiStubSub(masm);
break;
-
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
break;
-
default:
- break;
+ UNREACHABLE();
}
}
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label slow;
+ GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow,
+ Label::Distance non_smi_near,
+ Label::Distance slow_near) {
+ Label done;
+ __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+ __ SmiNeg(rax, rax, &done, Label::kNear);
+ __ jmp(slow, slow_near);
+ __ bind(&done);
+ __ ret(0);
+}
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi,
+ Label::Distance non_smi_near) {
+ __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+ __ SmiNot(rax, rax);
+ __ ret(0);
+}
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
-
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &not_string1);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ GenerateHeapNumberStubSub(masm);
break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -939,83 +513,163 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, slow, call_builtin;
+ GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+ __ bind(&call_builtin);
+ GenerateGenericCodeFallback(masm);
}
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ // Check if the operand is a heap number.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, slow);
+
+ // Operand is a float, negate its value by flipping the sign bit.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ Set(kScratchRegister, 0x01);
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
} else {
- __ push(rdx);
+ // Allocate a heap number before calculating the answer,
+ // so we don't have an untagged double around during GC.
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
__ push(rax);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rcx, rax);
+ __ pop(rax);
+ __ LeaveInternalFrame();
+ __ bind(&heapnumber_allocated);
+ // rcx: allocated 'empty' number
+
+ // Copy the double value to the new heap number, flipping the sign.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Set(kScratchRegister, 0x01);
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
}
- __ push(rcx);
+ __ ret(0);
}
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+ Label* slow) {
+ // Check if the operand is a heap number.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, slow);
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
+}
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
- __ Push(Smi::FromInt(runtime_operands_type_));
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
- __ push(rcx); // The return address.
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
- TRBinaryOpIC::TypeInfo type_info,
- TRBinaryOpIC::TypeInfo result_type_info) {
- TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
+const char* UnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
+ return name_;
}
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
@@ -1031,35 +685,39 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
5,
1);
}
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
- case TRBinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
- case TRBinaryOpIC::SMI:
+ case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case TRBinaryOpIC::INT32:
+ case BinaryOpIC::INT32:
UNREACHABLE();
// The int32 case is identical to the Smi case. We avoid creating this
// ic state on x64.
break;
- case TRBinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
- case TRBinaryOpIC::ODDBALL:
+ case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
- case TRBinaryOpIC::STRING:
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
- case TRBinaryOpIC::GENERIC:
+ case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
@@ -1068,10 +726,11 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
}
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -1083,41 +742,43 @@ const char* TypeRecordingBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "TypeRecordingBinaryOpStub_%s_%s_%s",
+ "BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
return name_;
}
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+ MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ // Arguments to BinaryOpStub are in rdx and rax.
+ Register left = rdx;
+ Register right = rax;
+
// We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations.
+ // for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
(op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV);
-
- // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
- Register left = rdx;
- Register right = rax;
-
+ op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
Label not_smis;
Label use_fp_on_smis;
- Label restore_MOD_registers; // Only used if op_ == Token::MOD.
+ Label fail;
if (op_ != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
+ Label smi_values;
+ __ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
switch (op_) {
@@ -1156,9 +817,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
case Token::BIT_OR: {
ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
+ __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
break;
}
case Token::BIT_XOR:
@@ -1182,7 +841,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
break;
case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &not_smis);
+ __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
__ movq(rax, left);
break;
@@ -1193,48 +852,58 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
// 5. Emit return of result in rax. Some operations have registers pushed.
__ ret(0);
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
+ if (use_fp_on_smis.is_linked()) {
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ // Restore left and right to rdx and rax.
+ __ movq(rdx, rcx);
+ __ movq(rax, rbx);
+ }
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
+ if (generate_inline_heapnumber_results) {
+ __ AllocateHeapNumber(rcx, rbx, slow);
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ if (op_ == Token::SHR) {
+ __ SmiToInteger32(left, left);
+ __ cvtqsi2sd(xmm0, left);
+ } else {
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ }
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else {
+ __ jmp(&fail);
}
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
}
// 7. Non-smi operands reach the end of the code generated by
// GenerateSmiCode, and fall through to subsequent code,
// with the operands in rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
+ // But first we check if non-smi values are HeapNumbers holding
+ // values that could be smi.
__ bind(&not_smis);
- if (op_ == Token::BIT_OR) {
- __ movq(right, rcx);
- }
+ Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
+ &smi_values, &fail);
+ __ jmp(&smi_values);
+ __ bind(&fail);
}
-void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
- MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
+void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure) {
switch (op_) {
case Token::ADD:
case Token::SUB:
@@ -1303,7 +972,7 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
// already loaded heap_number_map.
__ AllocateInNewSpace(HeapNumber::kSize,
rax,
- rcx,
+ rdx,
no_reg,
&allocation_failed,
TAG_OBJECT);
@@ -1323,7 +992,7 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
// We need tagged values in rdx and rax for the following code,
// not int32 in rax and rcx.
__ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rax);
+ __ Integer32ToSmi(rdx, rbx);
__ jmp(allocation_failure);
}
break;
@@ -1333,32 +1002,32 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+ "BinaryStub::GenerateFloatingPointCode.");
}
}
-void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
- NearLabel left_not_string, call_runtime;
+ Label left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = rdx;
Register right = rax;
// Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string);
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string);
+ __ j(above_equal, &left_not_string, Label::kNear);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
+ __ j(above_equal, &call_runtime, Label::kNear);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
@@ -1369,7 +1038,7 @@ void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
switch (op_) {
case Token::ADD:
@@ -1411,27 +1080,70 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smi;
-
- GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label call_runtime;
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ }
- __ bind(&not_smi);
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
GenerateTypeTransition(masm);
+
+ if (call_runtime.is_linked()) {
+ __ bind(&call_runtime);
+ GenerateCallRuntimeCode(masm);
+ }
}
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
GenerateStringAddCode(masm);
// Try to add arguments as strings, otherwise, transition to the generic
- // TRBinaryOpIC type.
+ // BinaryOpIC type.
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
GenerateTypeTransition(masm);
}
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
Label call_runtime;
if (op_ == Token::ADD) {
@@ -1441,18 +1153,18 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
// Convert oddball arguments to numbers.
- NearLabel check, done;
+ Label check, done;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check);
+ __ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(rdx, rdx);
} else {
__ LoadRoot(rdx, Heap::kNanValueRootIndex);
}
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&check);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
__ xor_(rax, rax);
} else {
@@ -1464,7 +1176,7 @@ void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
GenerateFloatingPointCode(masm, &gc_required, &not_number);
@@ -1476,7 +1188,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -1493,9 +1205,8 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
Label skip_allocation;
OverwriteMode mode = mode_;
switch (mode) {
@@ -1533,7 +1244,7 @@ void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
}
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
__ pop(rcx);
__ push(rdx);
__ push(rax);
@@ -1560,11 +1271,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label skip_cache;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
- NearLabel input_not_smi;
- NearLabel loaded;
+ Label input_not_smi, loaded;
// Test that rax is a number.
__ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
+ __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
@@ -1575,7 +1285,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ movq(rdx, xmm1);
__ fld_d(Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded);
+ __ jmp(&loaded, Label::kNear);
__ bind(&input_not_smi);
// Check if input is a HeapNumber.
@@ -1614,15 +1324,18 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ xorl(rcx, rdx);
__ xorl(rax, rdi);
__ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] == double value.
// rbx = bits of double value.
// rcx = TranscendentalCache::hash(double value).
- __ movq(rax, ExternalReference::transcendental_cache_array_address());
- // rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ ExternalReference cache_array =
+ ExternalReference::transcendental_cache_array_address(masm->isolate());
+ __ movq(rax, cache_array);
+ int cache_array_index =
+ type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
+ __ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax);
@@ -1630,7 +1343,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
+ TranscendentalCache::SubCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
@@ -1647,9 +1360,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ addl(rcx, rcx);
__ lea(rcx, Operand(rax, rcx, times_8, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
- NearLabel cache_miss;
+ Label cache_miss;
__ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
+ __ j(not_equal, &cache_miss, Label::kNear);
// Cache hit!
__ movq(rax, Operand(rcx, 2 * kIntSize));
if (tagged) {
@@ -1703,7 +1416,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ fstp(0);
__ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ __ TailCallExternalReference(
+ ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
} else { // UNTAGGED.
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
@@ -1756,8 +1470,8 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ j(below, &in_range);
// Check for infinity and NaN. Both return NaN for sin.
__ cmpl(rdi, Immediate(0x7ff));
- NearLabel non_nan_result;
- __ j(not_equal, &non_nan_result);
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
__ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
@@ -1785,7 +1499,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Compute st(0) % st(1)
{
- NearLabel partial_remainder_loop;
+ Label partial_remainder_loop;
__ bind(&partial_remainder_loop);
__ fprem1();
__ fwait();
@@ -1822,74 +1536,6 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
}
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- NearLabel done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
@@ -1937,7 +1583,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(r8, Immediate(0));
+ __ Set(r8, 0);
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
@@ -1957,7 +1603,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&check_undefined_arg2);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
+ __ Set(rcx, 0);
__ jmp(&done);
__ bind(&arg2_is_object);
@@ -2032,87 +1678,57 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
}
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
- __ SmiNeg(rax, rax, &done);
- __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue.
-
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(rax);
- }
-
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- // Try floating point case.
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ AbortIfSmi(rax);
- }
-
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* on_success,
+ Label* on_not_smis) {
+ Register heap_number_map = scratch3;
+ Register smi_result = scratch1;
+ Label done;
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label first_smi;
+ __ JumpIfSmi(first, &first_smi, Label::kNear);
+ __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, on_not_smis);
+ // Convert HeapNumber to smi if possible.
+ __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+ __ movq(scratch2, xmm0);
+ __ cvttsd2siq(smi_result, xmm0);
+ // Check if conversion was successful by converting back and
+ // comparing to the original double's bits.
+ __ cvtlsi2sd(xmm1, smi_result);
+ __ movq(kScratchRegister, xmm1);
+ __ cmpq(scratch2, kScratchRegister);
+ __ j(not_equal, on_not_smis);
+ __ Integer32ToSmi(first, smi_result);
+
+ __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
+ __ bind(&first_smi);
+ if (FLAG_debug_code) {
+ // Second should be non-smi if we get here.
+ __ AbortIfSmi(second);
}
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
+ __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, on_not_smis);
+ // Convert second to smi, if possible.
+ __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+ __ movq(scratch2, xmm0);
+ __ cvttsd2siq(smi_result, xmm0);
+ __ cvtlsi2sd(xmm1, smi_result);
+ __ movq(kScratchRegister, xmm1);
+ __ cmpq(scratch2, kScratchRegister);
+ __ j(not_equal, on_not_smis);
+ __ Integer32ToSmi(second, smi_result);
+ if (on_success != NULL) {
+ __ jmp(on_success);
+ } else {
+ __ bind(&done);
}
}
@@ -2130,7 +1746,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// Save 1 in xmm3 - we need this several times later on.
- __ movl(rcx, Immediate(1));
+ __ Set(rcx, 1);
__ cvtlsi2sd(xmm3, rcx);
Label exponent_nonsmi;
@@ -2162,20 +1778,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movq(rdx, rax);
// Get absolute value of exponent.
- NearLabel no_neg;
+ Label no_neg;
__ cmpl(rax, Immediate(0));
- __ j(greater_equal, &no_neg);
+ __ j(greater_equal, &no_neg, Label::kNear);
__ negl(rax);
__ bind(&no_neg);
// Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- NearLabel while_true;
- NearLabel no_multiply;
+ __ movaps(xmm1, xmm3);
+ Label while_true;
+ Label no_multiply;
__ bind(&while_true);
__ shrl(rax, Immediate(1));
- __ j(not_carry, &no_multiply);
+ __ j(not_carry, &no_multiply, Label::kNear);
__ mulsd(xmm1, xmm0);
__ bind(&no_multiply);
__ mulsd(xmm0, xmm0);
@@ -2187,8 +1803,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ xorpd(xmm0, xmm0);
+ __ movaps(xmm1, xmm3);
+ __ xorps(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
@@ -2205,12 +1821,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
- NearLabel base_not_smi;
- NearLabel handle_special_cases;
- __ JumpIfNotSmi(rdx, &base_not_smi);
+ Label base_not_smi, handle_special_cases;
+ __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
- __ jmp(&handle_special_cases);
+ __ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
@@ -2225,22 +1840,22 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
- NearLabel not_minus_half;
+ Label not_minus_half;
// Test for -0.5.
// Load xmm2 with -0.5.
__ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
__ movq(xmm2, rcx);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
+ __ j(not_equal, &not_minus_half, Label::kNear);
// Calculates reciprocal of square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
+ __ xorps(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
+ __ movaps(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
@@ -2253,8 +1868,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &call_runtime);
// Calculates square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
+ __ xorps(xmm1, xmm1);
+ __ addsd(xmm1, xmm0); // Convert -0 to 0.
__ sqrtsd(xmm1, xmm1);
__ bind(&allocate_return);
@@ -2280,11 +1895,14 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label slow;
__ JumpIfNotSmi(rdx, &slow);
- // Check if the calling frame is an arguments adaptor frame.
+ // Check if the calling frame is an arguments adaptor frame. We look at the
+ // context offset, and if the frame is not a regular one, then we find a
+ // Smi instead of the context. We can't use SmiCompare here, because that
+ // only works for comparing two smis.
Label adaptor;
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
@@ -2325,103 +1943,340 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // rsp[0] : return address
+ // rsp[8] : number of parameters (tagged)
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+ // Registers used over the whole function:
+ // rbx: the mapped parameter count (untagged)
+ // rax: the allocated object (tagged).
+
+ Factory* factory = masm->isolate()->factory();
+
+ __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ // rbx = parameter count (untagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ movq(rcx, rbx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger64(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // rbx = parameter count (untagged)
+ // rcx = argument count (untagged)
+ // Compute the mapped parameter count = min(rbx, rcx) in rbx.
+ __ cmpq(rbx, rcx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ movq(rbx, rcx);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ testq(rbx, rbx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
+
+ // rax = address of new object(s) (tagged)
+ // rcx = argument count (untagged)
+ // Get the arguments boilerplate from the current (global) context into rdi.
+ Label has_mapped_parameters, copy;
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ testq(rbx, rbx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+
+ const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+ __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ __ jmp(&copy, Label::kNear);
+
+ const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
+ __ bind(&has_mapped_parameters);
+ __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ bind(&copy);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // rcx = argument count (untagged)
+ // rdi = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(rdx, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), rdx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ rdx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ // Note: rcx is tagged from here on.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ rcx);
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // rcx = argument count (tagged)
+ // rdi = address of parameter map or backing store (tagged)
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ testq(rbx, rbx);
+ __ j(zero, &skip_parameter_map);
+
+ __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ // rbx contains the untagged argument count. Add 2 and tag to write.
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ Integer64PlusConstantToSmi(r9, rbx, 2);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+
+ // Load tagged parameter count into r9.
+ __ movq(r9, Operand(rsp, 1 * kPointerSize));
+ __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
+ __ addq(r8, Operand(rsp, 3 * kPointerSize));
+ __ subq(r8, r9);
+ __ Move(r11, factory->the_hole_value());
+ __ movq(rdx, rdi);
+ __ SmiToInteger64(kScratchRegister, r9);
+ __ lea(rdi, Operand(rdi, kScratchRegister,
+ times_pointer_size,
+ kParameterMapHeaderSize));
+ // r9 = loop variable (tagged)
+ // r8 = mapping index (tagged)
+ // r11 = the hole value
+ // rdx = address of parameter map (tagged)
+ // rdi = address of backing store (tagged)
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ SmiSubConstant(r9, r9, Smi::FromInt(1));
+ __ SmiToInteger64(kScratchRegister, r9);
+ __ movq(FieldOperand(rdx, kScratchRegister,
+ times_pointer_size,
+ kParameterMapHeaderSize),
+ r8);
+ __ movq(FieldOperand(rdi, kScratchRegister,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r11);
+ __ SmiAddConstant(r8, r8, Smi::FromInt(1));
+ __ bind(&parameters_test);
+ __ SmiTest(r9);
+ __ j(not_zero, &parameters_loop, Label::kNear);
+
+ __ bind(&skip_parameter_map);
+
+ // rcx = argument count (tagged)
+ // rdi = address of backing store (tagged)
+ // Copy arguments header and remaining slots (if there are any).
+ __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
+ factory->fixed_array_map());
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
+ Label arguments_loop, arguments_test;
+ __ movq(r8, rbx);
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ // Untag rcx and r8 for the loop below.
+ __ SmiToInteger64(rcx, rcx);
+ __ SmiToInteger64(r8, r8);
+ __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subq(rdx, kScratchRegister);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ subq(rdx, Immediate(kPointerSize));
+ __ movq(r9, Operand(rdx, 0));
+ __ movq(FieldOperand(rdi, r8,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r9);
+ __ addq(r8, Immediate(1));
+
+ __ bind(&arguments_test);
+ __ cmpq(r8, rcx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ // rcx = argument count (untagged)
+ __ bind(&runtime);
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[8] : number of parameters
+ // esp[16] : receiver displacement
+ // esp[24] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ testq(rcx, rcx);
+ __ j(zero, &add_arguments_object, Label::kNear);
+ __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+ __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
__ movq(rdi, Operand(rdi, offset));
// Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
// Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ rcx);
// If there are no actual arguments, we're done.
Label done;
- __ SmiTest(rcx);
+ __ testq(rcx, rcx);
__ j(zero, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+
+
__ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+ // Untag the length for the loop below.
+ __ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
+ __ decq(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -2430,7 +2285,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -2459,14 +2314,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kJSRegExpOffset = 4 * kPointerSize;
Label runtime;
-
// Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
__ testq(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
@@ -2477,32 +2331,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rcx);
+ Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
"Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
__ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
}
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
__ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ SmiToInteger32(rdx,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
__ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
__ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the second argument is a string.
__ movq(rdi, Operand(rsp, kSubjectOffset));
@@ -2530,7 +2384,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the JSArray is in fast case.
__ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
__ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rdi, Factory::fixed_array_map());
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
@@ -2542,7 +2397,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
- NearLabel seq_ascii_string, seq_two_byte_string, check_code;
+ Label seq_ascii_string, seq_two_byte_string, check_code;
__ movq(rdi, Operand(rsp, kSubjectOffset));
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -2550,10 +2405,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ andb(rbx, Immediate(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
+ __ j(zero, &seq_two_byte_string, Label::kNear);
// Any other flat string must be a flat ascii string.
__ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
+ __ j(zero, &seq_ascii_string, Label::kNear);
// Check for flat cons string.
// A flat cons string is a cons string where the second part is the empty
@@ -2565,8 +2420,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
- __ movq(rdx, FieldOperand(rdi, ConsString::kSecondOffset));
- __ Cmp(rdx, Factory::empty_string());
+ __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
__ j(not_equal, &runtime);
__ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -2577,7 +2432,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
+ __ j(zero, &seq_two_byte_string, Label::kNear);
// Any other flat string must be ascii.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask));
@@ -2588,7 +2443,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rax: RegExp data (FixedArray)
__ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
__ Set(rcx, 1); // Type is ascii.
- __ jmp(&check_code);
+ __ jmp(&check_code, Label::kNear);
__ bind(&seq_two_byte_string);
// rdi: subject string (flat two-byte)
@@ -2615,15 +2470,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->regexp_entry_native(), 1);
- static const int kRegExpExecuteArguments = 7;
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- __ EnterApiExitFrame(argument_slots_on_stack); // Clobbers rax!
+ __ EnterApiExitFrame(argument_slots_on_stack);
- // Argument 7: Indicate that this is a direct call from JavaScript.
+ // Argument 8: Pass current isolate address.
+ // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ // Immediate(ExternalReference::isolate_address()));
+ __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ kScratchRegister);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area.
@@ -2633,14 +2497,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ addq(r9, Operand(kScratchRegister, 0));
// Argument 6 passed in r9 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
#endif
// Argument 5: static offsets vector buffer.
- __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ __ LoadAddress(r8,
+ ExternalReference::address_of_static_offsets_vector(isolate));
// Argument 5 passed in r8 on Linux and on the stack on Windows.
#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+ __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
#endif
// First four arguments are passed in registers on both Linux and Windows.
@@ -2664,13 +2529,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4: End of string data
// Argument 3: Start of string data
- NearLabel setup_two_byte, setup_rest;
+ Label setup_two_byte, setup_rest;
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
- __ j(zero, &setup_two_byte);
+ __ j(zero, &setup_two_byte, Label::kNear);
__ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
__ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
__ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
+ __ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
__ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
__ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
@@ -2681,7 +2546,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(arg2, rbx);
// Argument 1: Subject string.
-#ifdef WIN64_
+#ifdef _WIN64
__ movq(arg1, rdi);
#else
// Already there in AMD64 calling convention.
@@ -2695,10 +2560,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ LeaveApiExitFrame();
// Check the result.
- NearLabel success;
+ Label success;
Label exception;
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
+ __ j(equal, &success, Label::kNear);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
__ j(equal, &exception);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
@@ -2741,17 +2606,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
// Get the static offsets vector filled by the native regexp code.
- __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+ __ LoadAddress(rcx,
+ ExternalReference::address_of_static_offsets_vector(isolate));
// rbx: last_match_info backing store (FixedArray)
// rcx: offsets vector
// rdx: number of capture registers
- NearLabel next_capture, done;
+ Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
__ subq(rdx, Immediate(1));
- __ j(negative, &done);
+ __ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
@@ -2773,17 +2639,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(rbx, pending_exception_address);
- __ movq(rax, Operand(rbx, 0));
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address, isolate);
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address, rbx);
+ __ movq(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ cmpq(rax, rdx);
__ j(equal, &runtime);
- __ movq(Operand(rbx, 0), rdx);
+ __ movq(pending_exception_operand, rdx);
__ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- NearLabel termination_exception;
- __ j(equal, &termination_exception);
+ Label termination_exception;
+ __ j(equal, &termination_exception, Label::kNear);
__ Throw(rax);
__ bind(&termination_exception);
@@ -2830,8 +2698,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
// Set empty properties FixedArray.
- __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
- Factory::empty_fixed_array());
+ __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
+ __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
// Set elements to point to FixedArray allocated right after the JSArray.
__ lea(rcx, Operand(rax, JSRegExpResult::kSize));
@@ -2851,13 +2719,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// rbx: Number of elements in array as int32.
// Set map.
- __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
// Set length.
__ Integer32ToSmi(rdx, rbx);
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
// Fill contents of fixed-array with the-hole.
- __ Move(rdx, Factory::the_hole_value());
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
// rax: JSArray.
@@ -2908,9 +2776,13 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Heap::GetNumberStringCache.
Label is_smi;
Label load_result_from_cache;
+ Factory* factory = masm->isolate()->factory();
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+ __ CheckMap(object,
+ factory->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
STATIC_ASSERT(8 == kDoubleSize);
__ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
@@ -2925,8 +2797,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
times_1,
FixedArray::kHeaderSize));
__ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
__ ucomisd(xmm0, xmm1);
@@ -2955,7 +2825,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
index,
times_1,
FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->number_to_string_native(), 1);
}
@@ -2998,6 +2869,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
Label check_unequal_objects, done;
+ Factory* factory = masm->isolate()->factory();
// Compare two smis if required.
if (include_smi_compare_) {
@@ -3025,40 +2897,39 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
- NearLabel not_identical;
+ Label not_identical;
__ cmpq(rax, rdx);
- __ j(not_equal, &not_identical);
+ __ j(not_equal, &not_identical, Label::kNear);
if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
- NearLabel check_for_nan;
+ Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
+ __ j(not_equal, &check_for_nan, Label::kNear);
__ Set(rax, NegativeComparisonResult(cc_));
__ ret(0);
__ bind(&check_for_nan);
}
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
// We cannot set rax to EQUAL until just before return because
// rax must be unchanged on jump to not_identical.
-
if (never_nan_nan_ && (cc_ == equal)) {
__ Set(rax, EQUAL);
__ ret(0);
} else {
- NearLabel heap_number;
+ Label heap_number;
// If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical);
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical, Label::kNear);
}
__ Set(rax, EQUAL);
__ ret(0);
@@ -3098,7 +2969,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if the non-smi operand is a heap number.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ factory->heap_number_map());
// If heap number, handle it in the slow case.
__ j(equal, &slow);
// Return non-equal. ebx (the lower half of rbx) is not zero.
@@ -3113,10 +2984,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- NearLabel first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax (not rax) is not zero)
Label return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0);
@@ -3128,7 +2999,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ CmpInstanceType(rcx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -3143,14 +3014,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
- NearLabel unordered;
+ Label unordered;
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
__ xorl(rax, rax);
__ xorl(rcx, rcx);
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
+ __ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
@@ -3190,13 +3061,21 @@ void CompareStub::Generate(MacroAssembler* masm) {
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ if (cc_ == equal) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
rcx,
- rbx,
- rdi,
- r8);
+ rbx);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+ }
#ifdef DEBUG
__ Abort("Unexpected fall-through from string comparison");
@@ -3207,7 +3086,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- NearLabel not_both_objects, return_unequal;
+ Label not_both_objects, return_unequal;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -3215,17 +3094,17 @@ void CompareStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagMask == 1);
__ lea(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects);
+ __ j(not_zero, &not_both_objects, Label::kNear);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+ __ j(below, &not_both_objects, Label::kNear);
+ __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(below, &not_both_objects, Label::kNear);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
+ __ j(zero, &return_unequal, Label::kNear);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
__ Set(rax, EQUAL);
@@ -3283,30 +3162,22 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
+ // The receiver might implicitly be the global object. This is
+ // indicated by passing the hole as the receiver to the call
+ // function stub.
+ if (ReceiverMightBeImplicit()) {
+ Label call;
// Get the receiver from the stack.
// +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
__ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
+ // Call as function is indicated with the hole.
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &call, Label::kNear);
+ // Patch the receiver on the stack with the global receiver object.
+ __ movq(rbx, GlobalObjectOperand());
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx);
+ __ bind(&call);
}
// Get the function to call from the stack.
@@ -3321,7 +3192,23 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ if (ReceiverMightBeImplicit()) {
+ Label call_as_function;
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &call_as_function);
+ __ InvokeFunction(rdi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
+ __ bind(&call_as_function);
+ }
+ __ InvokeFunction(rdi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_FUNCTION);
// Slow-case: Non-function called.
__ bind(&slow);
@@ -3331,11 +3218,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Set(rax, argc_);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ Handle<Code> adaptor =
+ Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
+bool CEntryStub::NeedsImmovableCode() {
+ return false;
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Throw exception in eax.
__ Throw(rax);
@@ -3353,7 +3246,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r12: pointer to the first argument (C callee-saved).
+ // r15: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
@@ -3383,10 +3276,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ incl(Operand(kScratchRegister, 0));
+ Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
+ __ incl(scope_depth_operand);
}
// Call C function.
@@ -3394,30 +3287,33 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r12); // argv.
+ __ movq(StackSpaceOperand(1), r15); // argv.
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
__ lea(rcx, StackSpaceOperand(0));
+ __ LoadAddress(rdx, ExternalReference::isolate_address());
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
__ lea(rdx, StackSpaceOperand(0));
+ __ LoadAddress(r8, ExternalReference::isolate_address());
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
- __ movq(rsi, r12); // argv.
+ __ movq(rsi, r15); // argv.
+ __ movq(rdx, ExternalReference::isolate_address());
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ decl(Operand(kScratchRegister, 0));
+ Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
+ __ decl(scope_depth_operand);
}
// Check for failure result.
@@ -3446,11 +3342,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Handling of failure.
__ bind(&failure_returned);
- NearLabel retry;
+ Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
+ __ j(zero, &retry, Label::kNear);
// Special handling of out of memory exceptions.
__ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
@@ -3458,12 +3354,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ movq(rax, Operand(kScratchRegister, 0));
- __ movq(rdx, ExternalReference::the_hole_value_location());
- __ movq(rdx, Operand(rdx, 0));
- __ movq(Operand(kScratchRegister, 0), rdx);
+ ExternalReference pending_exception_address(
+ Isolate::k_pending_exception_address, masm->isolate());
+ Operand pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
+ __ movq(rax, pending_exception_operand);
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ movq(pending_exception_operand, rdx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
@@ -3514,7 +3411,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r12: argv pointer (C callee-saved).
+ // r15: argv pointer (C callee-saved).
Label throw_normal_exception;
Label throw_termination_exception;
@@ -3562,54 +3459,64 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
#ifdef ENABLE_LOGGING_AND_PROFILING
Label not_outermost_js, not_outermost_js_2;
#endif
-
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ { // NOLINT. Scope block confuses linter.
+ MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Push the stack frame type marker twice.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
+ __ push(rbx);
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
+
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ __ InitializeSmiConstantRegister();
+ __ InitializeRootRegister();
+ }
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ load_rax(c_entry_fp);
- __ push(rax);
+ Isolate* isolate = masm->isolate();
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
- __ InitializeSmiConstantRegister();
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, isolate);
+ {
+ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
+ __ push(c_entry_fp_operand);
+ }
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ load_rax(js_entry_sp);
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
+ __ Load(rax, js_entry_sp);
__ testq(rax, rax);
__ j(not_zero, &not_outermost_js);
+ __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movq(rax, rbp);
- __ store_rax(js_entry_sp);
+ __ Store(js_entry_sp, rax);
+ Label cont;
+ __ jmp(&cont);
__ bind(&not_outermost_js);
+ __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
#endif
// Call a faked try-block that does the invoke.
@@ -3617,8 +3524,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Caught exception: Store result (exception) in the pending
// exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ store_rax(pending_exception);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ isolate);
+ __ Store(pending_exception, rax);
__ movq(rax, Failure::Exception(), RelocInfo::NONE);
__ jmp(&exit);
@@ -3627,8 +3535,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
// Clear any pending exceptions.
- __ load_rax(ExternalReference::the_hole_value_location());
- __ store_rax(pending_exception);
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ Store(pending_exception, rax);
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
@@ -3639,35 +3547,34 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// directly in the code, because the builtin stubs may not have been
// generated yet at the time this code is generated.
if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ load_rax(construct_entry);
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ isolate);
+ __ Load(rax, construct_entry);
} else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ load_rax(entry);
+ ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
+ __ Load(rax, entry);
}
__ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- __ pop(Operand(kScratchRegister, 0));
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ __ PopTryHandler();
+ __ bind(&exit);
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current RBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
+ // Check if the current stack frame is marked as the outermost JS frame.
+ __ pop(rbx);
+ __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
+ __ movq(kScratchRegister, js_entry_sp);
__ movq(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
#endif
// Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
- __ pop(Operand(kScratchRegister, 0));
+ { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
+ __ pop(c_entry_fp_operand);
+ }
// Restore callee-saved registers (X64 conventions).
__ pop(rbx);
@@ -3690,88 +3597,159 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Implements "value instanceof function" operator.
- // Expected input state:
+ // Expected input state with no inline cache:
// rsp[0] : return address
// rsp[1] : function pointer
// rsp[2] : value
+ // Expected input state with an inline one-element cache:
+ // rsp[0] : return address
+ // rsp[1] : offset from return address to location of inline cache
+ // rsp[2] : function pointer
+ // rsp[3] : value
// Returns a bitwise zero to indicate that the value
// is and instance of the function and anything else to
// indicate that the value is not an instance.
- // None of the flags are supported on X64.
- ASSERT(flags_ == kNoFlags);
+ static const int kOffsetToMapCheckValue = 2;
+ static const int kOffsetToResultValue = 18;
+ // The last 4 bytes of the instruction sequence
+ // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
+ // Move(kScratchRegister, FACTORY->the_hole_value())
+ // in front of the hole value address.
+ static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
+ // The last 4 bytes of the instruction sequence
+ // __ j(not_equal, &cache_miss);
+ // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
+ // before the offset of the hole value in the root array.
+ static const unsigned int kWordBeforeResultValue = 0x458B4909;
+ // Only the inline check flag is supported on X64.
+ ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
+ int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
// Get the object - go slow case if it's a smi.
Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
+
+ __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
__ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
// rdx is function, rax is map.
- // Look up the function and the map in the instanceof cache.
- NearLabel miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss, Label::kNear);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+ __ bind(&miss);
+ }
- __ bind(&miss);
__ TryGetFunctionPrototype(rdx, rbx, &slow);
// Check that the function prototype is a JS object.
__ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
__ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
__ j(above, &slow);
// Register mapping:
// rax is object map.
// rdx is function.
// rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
+ __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
+ if (FLAG_debug_code) {
+ __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
+ __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
+ }
+ }
__ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
- NearLabel loop, is_instance, is_not_instance;
+ Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
+ __ j(equal, &is_instance, Label::kNear);
__ cmpq(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
+ __ j(equal, &is_not_instance, Label::kNear);
__ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ if (!HasCallSiteInlineCheck()) {
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Store offset of true in the root array at the inline check site.
+ ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+ == 0xB0 - 0x100);
+ __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize.
+ __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
+ __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
+ if (FLAG_debug_code) {
+ __ movl(rax, Immediate(kWordBeforeResultValue));
+ __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
+ }
+ __ Set(rax, 0);
+ }
+ __ ret(2 * kPointerSize + extra_stack_space);
__ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
+ if (!HasCallSiteInlineCheck()) {
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Store offset of false in the root array at the inline check site.
+ ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+ == 0xB8 - 0x100);
+ __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize.
+ __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
+ __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
+ if (FLAG_debug_code) {
+ __ movl(rax, Immediate(kWordBeforeResultValue));
+ __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ }
+ }
+ __ ret(2 * kPointerSize + extra_stack_space);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
+ if (HasCallSiteInlineCheck()) {
+ // Remove extra value from the stack.
+ __ pop(rcx);
+ __ pop(rax);
+ __ push(rcx);
+ }
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -3805,7 +3783,8 @@ const char* CompareStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
@@ -3936,10 +3915,14 @@ void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
__ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ Factory* factory = masm->isolate()->factory();
// Index is not a smi.
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ __ CheckMap(index_,
+ factory->heap_number_map(),
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
@@ -4048,15 +4031,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
+ __ JumpIfSmi(rax, &string_add_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
+ __ JumpIfSmi(rdx, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
} else {
@@ -4079,20 +4059,21 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rax: first string
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
- NearLabel second_not_zero_length, both_not_zero_length;
+ Label second_not_zero_length, both_not_zero_length;
__ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
__ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length);
+ __ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in rax.
- __ IncrementCounter(&Counters::string_add_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ movq(rbx, FieldOperand(rax, String::kLengthOffset));
__ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length);
+ __ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in rdx.
__ movq(rax, rdx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
@@ -4118,8 +4099,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
__ SmiAdd(rbx, rbx, rcx);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
__ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
@@ -4135,8 +4116,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
@@ -4176,7 +4157,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ movq(rax, rcx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
@@ -4250,7 +4231,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of second argument
StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
__ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
@@ -4287,7 +4268,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of second argument
StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
__ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
+ __ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
@@ -4383,9 +4364,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
ASSERT(count.is(rcx)); // rep movs count
// Nothing to do for zero characters.
- NearLabel done;
+ Label done;
__ testl(count, count);
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
// Make count the number of bytes to copy.
if (!ascii) {
@@ -4394,9 +4375,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
- NearLabel last_bytes;
+ Label last_bytes;
__ testl(count, Immediate(~7));
- __ j(zero, &last_bytes);
+ __ j(zero, &last_bytes, Label::kNear);
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
@@ -4410,7 +4391,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Check if there are more bytes to copy.
__ bind(&last_bytes);
__ testl(count, count);
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
// Copy remaining characters.
Label loop;
@@ -4438,10 +4419,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
- NearLabel not_array_index;
+ Label not_array_index;
__ leal(scratch, Operand(c1, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
+ __ j(above, &not_array_index, Label::kNear);
__ leal(scratch, Operand(c2, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_found);
@@ -4471,15 +4452,14 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ decl(mask);
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+ Register map = scratch4;
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string (32-bit int)
// symbol_table: symbol table
// mask: capacity mask (32-bit int)
- // undefined: undefined value
+ // map: -
// scratch: -
// Perform a number of probes in the symbol table.
@@ -4494,7 +4474,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
}
__ andl(scratch, mask);
- // Load the entry from the symble table.
+ // Load the entry from the symbol table.
Register candidate = scratch; // Scratch register contains candidate.
STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ movq(candidate,
@@ -4504,8 +4484,16 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
- __ cmpq(candidate, undefined);
+ Label is_string;
+ __ CmpObjectType(candidate, ODDBALL_TYPE, map);
+ __ j(not_equal, &is_string, Label::kNear);
+
+ __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
__ j(equal, not_found);
+ // Must be null (deleted entry).
+ __ jmp(&next_probe[i]);
+
+ __ bind(&is_string);
// If length is not 2 the string is not a candidate.
__ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
@@ -4517,8 +4505,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register temp = kScratchRegister;
// Check that the candidate is a non-external ascii string.
- __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(
temp, temp, &next_probe[i]);
@@ -4591,7 +4578,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27;
Label hash_not_zero;
__ j(not_zero, &hash_not_zero);
- __ movl(hash, Immediate(27));
+ __ Set(hash, 27);
__ bind(&hash_not_zero);
}
@@ -4696,7 +4683,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, rdx); // Restore rsi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
__ bind(&non_ascii_flat);
@@ -4733,7 +4721,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(rsi, rdx); // Restore esi.
__ bind(&return_rax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
// Just jump to runtime to create the sub string.
@@ -4742,6 +4730,47 @@ void SubStringStub::Generate(MacroAssembler* masm) {
}
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label check_zero_length;
+ __ movq(length, FieldOperand(left, String::kLengthOffset));
+ __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
+ __ j(equal, &check_zero_length, Label::kNear);
+ __ Move(rax, Smi::FromInt(NOT_EQUAL));
+ __ ret(0);
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ SmiTest(length);
+ __ j(not_zero, &compare_chars, Label::kNear);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ // Compare characters.
+ __ bind(&compare_chars);
+ Label strings_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+ &strings_not_equal, Label::kNear);
+
+ // Characters are equal.
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ // Characters are not equal.
+ __ bind(&strings_not_equal);
+ __ Move(rax, Smi::FromInt(NOT_EQUAL));
+ __ ret(0);
+}
+
+
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -4761,8 +4790,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
FieldOperand(right, String::kLengthOffset));
// Register scratch4 now holds left.length - right.length.
const Register length_difference = scratch4;
- NearLabel left_shorter;
- __ j(less, &left_shorter);
+ Label left_shorter;
+ __ j(less, &left_shorter, Label::kNear);
// The right string isn't longer that the left one.
// Get the right string's length by subtracting the (non-negative) difference
// from the left string's length.
@@ -4771,54 +4800,30 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Register scratch1 now holds Min(left.length, right.length).
const Register min_length = scratch1;
- NearLabel compare_lengths;
+ Label compare_lengths;
// If min-length is zero, go directly to comparing lengths.
__ SmiTest(min_length);
- __ j(zero, &compare_lengths);
+ __ j(zero, &compare_lengths, Label::kNear);
- __ SmiToInteger32(min_length, min_length);
+ // Compare loop.
+ Label result_not_equal;
+ GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+ &result_not_equal, Label::kNear);
- // Registers scratch2 and scratch3 are free.
- NearLabel result_not_equal;
- Label loop;
- {
- // Check characters 0 .. min_length - 1 in a loop.
- // Use scratch3 as loop index, min_length as limit and scratch2
- // for computation.
- const Register index = scratch3;
- __ movl(index, Immediate(0)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- // TODO(lrn): Could we load more than one character at a time?
- __ movb(scratch2, FieldOperand(left,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- // Increment index and use -1 modifier on next load to give
- // the previous load extra time to complete.
- __ addl(index, Immediate(1));
- __ cmpb(scratch2, FieldOperand(right,
- index,
- times_1,
- SeqAsciiString::kHeaderSize - 1));
- __ j(not_equal, &result_not_equal);
- __ cmpl(index, min_length);
- __ j(not_equal, &loop);
- }
// Completed loop without finding different characters.
// Compare lengths (precomputed).
__ bind(&compare_lengths);
__ SmiTest(length_difference);
- __ j(not_zero, &result_not_equal);
+ __ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
__ ret(0);
- NearLabel result_greater;
+ Label result_greater;
__ bind(&result_not_equal);
// Unequal comparison of left to right, either character or length.
- __ j(greater, &result_greater);
+ __ j(greater, &result_greater, Label::kNear);
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
@@ -4831,6 +4836,36 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
}
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance near_jump) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiToInteger32(length, length);
+ __ lea(left,
+ FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ __ neg(length);
+ Register index = length; // index = -length;
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ movb(scratch, Operand(left, index, times_1, 0));
+ __ cmpb(scratch, Operand(right, index, times_1, 0));
+ __ j(not_equal, chars_not_equal, near_jump);
+ __ addq(index, Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
void StringCompareStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -4843,11 +4878,12 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
// Check for identity.
- NearLabel not_same;
+ Label not_same;
__ cmpq(rdx, rax);
- __ j(not_equal, &not_same);
+ __ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_compare_native(), 1);
__ ret(2 * kPointerSize);
__ bind(&not_same);
@@ -4856,7 +4892,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ pop(rcx);
__ addq(rsp, Immediate(2 * kPointerSize));
@@ -4870,72 +4906,18 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
-void StringCharAtStub::Generate(MacroAssembler* masm) {
- // Expects two arguments (object, index) on the stack:
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: index
- // rsp[16]: object
-
- Register object = rbx;
- Register index = rax;
- Register scratch1 = rcx;
- Register scratch2 = rdx;
- Register result = rax;
-
- __ pop(scratch1); // Return address.
- __ pop(index);
- __ pop(object);
- __ push(scratch1);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Move(result, Factory::empty_string());
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Smi::FromInt(0));
- __ jmp(&done);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm, call_helper);
-
- __ bind(&done);
- __ ret(0);
-}
-
-
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
- NearLabel miss;
- __ JumpIfNotBothSmi(rdx, rax, &miss);
+ Label miss;
+ __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
__ subq(rax, rdx);
} else {
- NearLabel done;
+ Label done;
__ subq(rdx, rax);
- __ j(no_overflow, &done);
+ __ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ SmiNot(rdx, rdx);
__ bind(&done);
@@ -4951,16 +4933,16 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
- NearLabel generic_stub;
- NearLabel unordered;
- NearLabel miss;
+ Label generic_stub;
+ Label unordered;
+ Label miss;
Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub);
+ __ j(either_smi, &generic_stub, Label::kNear);
__ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
+ __ j(not_equal, &miss, Label::kNear);
// Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -4970,7 +4952,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
+ __ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
@@ -4991,16 +4973,133 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+ Register tmp1 = rcx;
+ Register tmp2 = rbx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+ __ j(cond, &miss, Label::kNear);
+
+ // Check that both operands are symbols.
+ __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &miss, Label::kNear);
+
+ // Symbols are compared by identity.
+ Label done;
+ __ cmpq(left, right);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(GetCondition() == equal);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+ Register tmp1 = rcx;
+ Register tmp2 = rbx;
+ Register tmp3 = rdi;
+
+ // Check that both operands are heap objects.
+ Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+ __ j(cond, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movq(tmp3, tmp1);
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ or_(tmp3, tmp2);
+ __ testb(tmp3, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Fast check for identical strings.
+ Label not_same;
+ __ cmpq(left, right);
+ __ j(not_equal, &not_same, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ // Handle not identical strings.
+ __ bind(&not_same);
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ Label do_compare;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp2);
+ __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &do_compare, Label::kNear);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ ret(0);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ bind(&do_compare);
+ __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ pop(tmp1); // Return address.
+ __ push(left);
+ __ push(right);
+ __ push(tmp1);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
- NearLabel miss;
+ Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss);
+ __ j(either_smi, &miss, Label::kNear);
__ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ subq(rax, rdx);
@@ -5019,7 +5118,8 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ push(rcx);
// Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
__ EnterInternalFrame();
__ push(rdx);
__ push(rax);
@@ -5041,144 +5141,206 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
}
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register untagged_key,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // elements - is set to the the receiver's element if
- // the receiver doesn't have a pixel array or the
- // key is not a smi, otherwise it's the elements'
- // external pointer.
- // untagged_key - is set to the untagged key
-
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit check
- // to ensure the key is a smi must be added.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
- }
- __ SmiToInteger32(untagged_key, key);
-
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Factory::pixel_array_map());
- __ Assert(equal, "Elements isn't a pixel array");
- }
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ String* name,
+ Register r0) {
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = r0;
+ // Capacity is smi 2^n.
+ __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+ __ decl(index);
+ __ and_(index,
+ Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = r0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ movq(entity_name, Operand(properties,
+ index,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
+ __ j(equal, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss);
+
+ // Check if the entry name is not a symbol.
+ __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+ __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, miss);
}
- // Check that the smi is in range.
- __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
- __ j(above_equal, out_of_range); // unsigned check handles negative keys.
-
- // Load and tag the element as a smi.
- __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
- __ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
- __ Integer32ToSmi(result, result);
- __ ret(0);
+ StringDictionaryLookupStub stub(properties,
+ r0,
+ r0,
+ StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+ __ Push(Handle<Object>(name));
+ __ push(Immediate(name->Hash()));
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ testq(r0, r0);
+ __ j(not_zero, miss);
+ __ jmp(done);
+ return result;
}
-// Stores an indexed element into a pixel array, clamping the stored value.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register scratch1,
- bool load_elements_from_receiver,
- bool key_is_untagged,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key (must be a smi) and is unchanged.
- // value - holds the value (must be a smi) and is unchanged.
- // elements - holds the element object of the receiver on entry if
- // load_elements_from_receiver is false, otherwise used
- // internally to store the pixel arrays elements and
- // external array pointer.
- //
- Register external_pointer = elements;
- Register untagged_key = scratch1;
- Register untagged_value = receiver; // Only set once success guaranteed.
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
+ __ decl(r0);
+
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r1, Immediate(String::kHashShift));
+ if (i > 0) {
+ __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r1, r0);
- // Fetch the receiver's elements if the caller hasn't already done so.
- if (load_elements_from_receiver) {
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- }
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
- // By passing NULL as not_pixel_array, callers signal that they have already
- // verified that the receiver has pixel array elements.
- if (not_pixel_array != NULL) {
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
- } else {
- if (FLAG_debug_code) {
- // Map check should have already made sure that elements is a pixel array.
- __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Factory::pixel_array_map());
- __ Assert(equal, "Elements isn't a pixel array");
- }
+ // Check if the key is identical to the name.
+ __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ j(equal, done);
}
- // Key must be a smi and it must be in range.
- if (key_is_untagged) {
- untagged_key = key;
- } else {
- // Some callers already have verified that the key is a smi. key_not_smi is
- // set to NULL as a sentinel for that case. Otherwise, add an explicit
- // check to ensure the key is a smi.
- if (key_not_smi != NULL) {
- __ JumpIfNotSmi(key, key_not_smi);
- } else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(key);
- }
+ StringDictionaryLookupStub stub(elements,
+ r0,
+ r1,
+ POSITIVE_LOOKUP);
+ __ push(name);
+ __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r0, Immediate(String::kHashShift));
+ __ push(r0);
+ __ CallStub(&stub);
+
+ __ testq(r0, r0);
+ __ j(zero, miss);
+ __ jmp(done);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Stack frame on entry:
+ // esp[0 * kPointerSize]: return address.
+ // esp[1 * kPointerSize]: key's hash.
+ // esp[2 * kPointerSize]: key.
+ // Registers:
+ // dictionary_: StringDictionary to probe.
+ // result_: used as scratch.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ Register scratch = result_;
+
+ __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
+ __ decl(scratch);
+ __ push(scratch);
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+ if (i > 0) {
+ __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(rsp, 0));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+
+ // Having undefined at this place means the name is not contained.
+ __ movq(scratch, Operand(dictionary_,
+ index_,
+ times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+
+ __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+ __ j(equal, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // If we hit a non symbol key during negative lookup
+ // we have to bailout as this key might be equal to the
+ // key we are looking for.
+
+ // Check if the entry name is not a symbol.
+ __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, &maybe_in_dictionary);
}
- __ SmiToInteger32(untagged_key, key);
}
- __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
- __ j(above_equal, out_of_range); // unsigned check handles negative keys.
-
- // Value must be a smi.
- __ JumpIfNotSmi(value, value_not_smi);
- __ SmiToInteger32(untagged_value, value);
-
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(untagged_value, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
- __ decb(untagged_value); // 0 if negative, 255 if positive.
- __ bind(&done);
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ movq(scratch, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
}
- __ movq(external_pointer,
- FieldOperand(elements, PixelArray::kExternalPointerOffset));
- __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
- __ ret(0); // Return value in eax.
+ __ bind(&in_dictionary);
+ __ movq(scratch, Immediate(1));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_in_dictionary);
+ __ movq(scratch, Immediate(0));
+ __ Drop(1);
+ __ ret(2 * kPointerSize);
}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index feb4de8187..a7ed91c504 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -59,78 +59,30 @@ class TranscendentalCacheStub: public CodeStub {
};
-class ToBooleanStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type = TypeInfo::Unknown())
+ UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
: op_(op),
mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
+ operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ UnaryOpStub(
+ int key,
+ UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
+ operand_type_(operand_type),
name_(NULL) {
}
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- bool ArgsInRegistersSupported() {
- return (op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV);
- }
-
private:
Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
+ UnaryOverwriteMode mode_;
// Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
+ UnaryOpIC::TypeInfo operand_type_;
char* name_;
@@ -138,93 +90,82 @@ class GenericBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
+ PrintF("UnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
+ UnaryOpIC::GetName(operand_type_));
}
#endif
- // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
- class ArgsReversedBits: public BitField<bool, 10, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
- class StaticTypeInfoBits: public BitField<int, 12, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
- Major MajorKey() { return GenericBinaryOp; }
+ Major MajorKey() { return UnaryOp; }
int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
}
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
+ void GenerateTypeTransition(MacroAssembler* masm);
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow,
+ Label::Distance non_smi_near = Label::kFar,
+ Label::Distance slow_near = Label::kFar);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi,
+ Label::Distance non_smi_near);
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
+ return UnaryOpIC::ToState(operand_type_);
}
- friend class CodeGenerator;
- friend class LCodeGen;
+ virtual void FinishCode(Code* code) {
+ code->set_unary_op_type(operand_type_);
+ }
};
-class TypeRecordingBinaryOpStub: public CodeStub {
+class BinaryOpStub: public CodeStub {
public:
- TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
- operands_type_(TRBinaryOpIC::UNINITIALIZED),
- result_type_(TRBinaryOpIC::UNINITIALIZED),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- TypeRecordingBinaryOpStub(
+ BinaryOpStub(
int key,
- TRBinaryOpIC::TypeInfo operands_type,
- TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operands_type_(operands_type),
@@ -241,8 +182,8 @@ class TypeRecordingBinaryOpStub: public CodeStub {
OverwriteMode mode_;
// Operand type information determined at runtime.
- TRBinaryOpIC::TypeInfo operands_type_;
- TRBinaryOpIC::TypeInfo result_type_;
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
char* name_;
@@ -250,22 +191,22 @@ class TypeRecordingBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
- PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
- TRBinaryOpIC::GetName(operands_type_));
+ BinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 12, 3> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
- Major MajorKey() { return TypeRecordingBinaryOp; }
+ Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
@@ -291,6 +232,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
@@ -298,15 +240,15 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return TRBinaryOpIC::ToState(operands_type_);
+ return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
- code->set_type_recording_binary_op_type(operands_type_);
- code->set_type_recording_binary_op_result_type(result_type_);
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
@@ -415,10 +357,9 @@ class SubStringStub: public CodeStub {
class StringCompareStub: public CodeStub {
public:
- explicit StringCompareStub() {}
+ StringCompareStub() {}
- // Compare two flat ascii strings and returns result in rax after popping two
- // arguments from the stack.
+ // Compares two flat ASCII strings and returns result in rax.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@@ -427,11 +368,27 @@ class StringCompareStub: public CodeStub {
Register scratch3,
Register scratch4);
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
+ // Compares two flat ASCII strings for equality and returns result
+ // in rax.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2);
- void Generate(MacroAssembler* masm);
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch,
+ Label* chars_not_equal,
+ Label::Distance near_jump = Label::kFar);
};
@@ -472,48 +429,73 @@ class NumberToStringStub: public CodeStub {
};
-// Generate code to load an element from a pixel array. The receiver is assumed
-// to not be a smi and to have elements, the caller must guarantee this
-// precondition. If key is not a smi, then the generated code branches to
-// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
-// check has already been performed on key so that the smi check is not
-// generated. If key is not a valid index within the bounds of the pixel array,
-// the generated code jumps to out_of_range. receiver, key and elements are
-// unchanged throughout the generated code sequence.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register untagged_key,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
-
-// Generate code to store an element into a pixel array, clamping values between
-// [0..255]. The receiver is assumed to not be a smi and to have elements, the
-// caller must guarantee this precondition. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated. If the value is not a smi, the
-// generated code will branch to value_not_smi. If the receiver
-// doesn't have pixel array elements, the generated code will branch to
-// not_pixel_array, unless not_pixel_array is NULL, in which case the caller
-// must ensure that the receiver has pixel array elements. If key is not a
-// valid index within the bounds of the pixel array, the generated code jumps to
-// out_of_range.
-void GenerateFastPixelArrayStore(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register elements,
- Register scratch1,
- bool load_elements_from_receiver,
- bool key_is_untagged,
- Label* key_not_smi,
- Label* value_not_smi,
- Label* not_pixel_array,
- Label* out_of_range);
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ StringDictionaryLookupStub(Register dictionary,
+ Register result,
+ Register index,
+ LookupMode mode)
+ : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register properties,
+ String* name,
+ Register r0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return DictionaryBits::encode(dictionary_.code()) |
+ ResultBits::encode(result_.code()) |
+ IndexBits::encode(index_.code()) |
+ LookupModeBits::encode(mode_);
+ }
+
+ class DictionaryBits: public BitField<int, 0, 4> {};
+ class ResultBits: public BitField<int, 4, 4> {};
+ class IndexBits: public BitField<int, 8, 4> {};
+ class LookupModeBits: public BitField<LookupMode, 12, 1> {};
+
+ Register dictionary_;
+ Register result_;
+ Register index_;
+ LookupMode mode_;
+};
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/x64/codegen-x64-inl.h
deleted file mode 100644
index 53caf91975..0000000000
--- a/deps/v8/src/x64/codegen-x64-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_X64_CODEGEN_X64_INL_H_
-#define V8_X64_CODEGEN_X64_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODEGEN_X64_INL_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index ad114c2431..f8f2d6e687 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -29,81 +29,14 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
- }
- }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
- }
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
- frame_state_.Restore(masm_);
-}
-
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- frame_state_->Restore(masm);
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
@@ -114,8628 +47,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- destination_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- ControlDestination* destination)
- : owner_(owner),
- destination_(destination),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- state_(NULL),
- loop_nesting_(0),
- function_return_is_shadowed_(false),
- in_spilled_code_(false) {
-}
-
-
-// Calling conventions:
-// rbp: caller's frame pointer
-// rsp: stack pointer
-// rdi: called JS function
-// rsi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
- Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- set_in_spilled_code(false);
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(0, loop_nesting_);
- loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
- JumpTarget::set_compiling_deferred_code(false);
-
- {
- CodeGenState state(this);
- // Entry:
- // Stack: receiver, arguments, return address.
- // rbp: caller's frame pointer
- // rsp: stack pointer
- // rdi: called JS function
- // rsi: callee's context
- allocator_->Initialize();
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- frame_->SpillAll();
- __ int3();
- }
-#endif
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->AsSlot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
-
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
- }
-
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- frame_->CallRuntime(Runtime::kTraceEnter, 0);
- // Ignore the return value.
- }
- CheckStack();
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- frame_->CallRuntime(Runtime::kDebugTrace, 0);
- // Ignore the return value.
- }
-#endif
- VisitStatements(info->function()->body());
-
- // Handle the return from the function.
- if (has_valid_frame()) {
- // If there is a valid frame, control flow can fall off the end of
- // the body. In that case there is an implicit return statement.
- ASSERT(!function_return_is_shadowed_);
- CodeForReturnPosition(info->function());
- frame_->PrepareForReturn();
- Result undefined(Factory::undefined_value());
- if (function_return_.is_bound()) {
- function_return_.Jump(&undefined);
- } else {
- function_return_.Bind(&undefined);
- GenerateReturnSequence(&undefined);
- }
- } else if (function_return_.is_linked()) {
- // If the return target has dangling jumps to it, then we have not
- // yet generated the return sequence. This can happen when (a)
- // control does not flow off the end of the body so we did not
- // compile an artificial return statement just above, and (b) there
- // are return statements in the body but (c) they are all shadowed.
- Result return_value;
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
- }
-
- // Adjust for function-level loop nesting.
- ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
- loop_nesting_ = 0;
-
- // Code generation state must be reset.
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- JumpTarget::set_compiling_deferred_code(true);
- ProcessDeferred();
- JumpTarget::set_compiling_deferred_code(false);
- }
-
- // There is no need to delete the register allocator, it is a
- // stack-allocated local.
- allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return frame_->ParameterAt(index);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(rsi)); // do not overwrite context register
- Register context = rsi;
- int chain_length = scope()->ContextChainLength(slot->var()->scope());
- for (int i = 0; i < chain_length; i++) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(rsp, 0);
- }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow) {
- ASSERT(slot->type() == Slot::CONTEXT);
- ASSERT(tmp.is_register());
- Register context = rsi;
-
- for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal, not_taken);
- __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
- ControlDestination* dest,
- bool force_control) {
- ASSERT(!in_spilled_code());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, dest);
- Visit(expr);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- !dest->is_used() &&
- frame_->height() == original_height) {
- dest->Goto(true);
- }
- }
-
- if (force_control && !dest->is_used()) {
- // Convert the TOS value into flow to the control destination.
- ToBoolean(dest);
- }
-
- ASSERT(!(force_control && !dest->is_used()));
- ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Load(expression);
- frame_->SpillAll();
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- JumpTarget true_target;
- JumpTarget false_target;
- ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(expr, &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The false target was just bound.
- JumpTarget loaded;
- frame_->Push(Factory::false_value());
- // There may be dangling jumps to the true target.
- if (true_target.is_linked()) {
- loaded.Jump();
- true_target.Bind();
- frame_->Push(Factory::true_value());
- loaded.Bind();
- }
-
- } else if (dest.is_used()) {
- // There is true, and possibly false, control flow (with true as
- // the fall through).
- JumpTarget loaded;
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- false_target.Bind();
- frame_->Push(Factory::false_value());
- loaded.Bind();
- }
-
- } else {
- // We have a valid value on top of the frame, but we still may
- // have dangling jumps to the true and false targets from nested
- // subexpressions (eg, the left subexpressions of the
- // short-circuited boolean operators).
- ASSERT(has_valid_frame());
- if (true_target.is_linked() || false_target.is_linked()) {
- JumpTarget loaded;
- loaded.Jump(); // Don't lose the current TOS.
- if (true_target.is_linked()) {
- true_target.Bind();
- frame_->Push(Factory::true_value());
- if (false_target.is_linked()) {
- loaded.Jump();
- }
- }
- if (false_target.is_linked()) {
- false_target.Bind();
- frame_->Push(Factory::false_value());
- }
- loaded.Bind();
- }
- }
-
- ASSERT(has_valid_frame());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- if (in_spilled_code()) {
- frame_->EmitPush(GlobalObjectOperand());
- } else {
- Result temp = allocator_->Allocate();
- __ movq(temp.reg(), GlobalObjectOperand());
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
- Result temp = allocator_->Allocate();
- Register reg = temp.reg();
- __ movq(reg, GlobalObjectOperand());
- __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
- frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
- // Special handling of identifiers as subexpressions of typeof.
- Variable* variable = expr->AsVariableProxy()->AsVariable();
- if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // For a global variable we build the property reference
- // <global>.<variable> and perform a (regular non-contextual) property
- // load to make sure we do not get reference errors.
- Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
- Literal key(variable->name());
- Property property(&global, &key, RelocInfo::kNoPosition);
- Reference ref(this, &property);
- ref.GetValue();
- } else if (variable != NULL && variable->AsSlot() != NULL) {
- // For a variable that rewrites to a slot, we signal it is the immediate
- // subexpression of a typeof.
- LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
- } else {
- // Anything else can be handled normally.
- Load(expr);
- }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
- ASSERT(scope()->arguments_shadow() != NULL);
- // We don't want to do lazy arguments allocation for functions that
- // have heap-allocated contexts, because it interfers with the
- // uninitialized const tracking in the context objects.
- return (scope()->num_heap_slots() > 0)
- ? EAGER_ARGUMENTS_ALLOCATION
- : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
- ArgumentsAllocationMode mode = ArgumentsMode();
- ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
- Comment cmnt(masm_, "[ store arguments object");
- if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
- // When using lazy arguments allocation, we store the arguments marker value
- // as a sentinel indicating that the arguments object hasn't been
- // allocated yet.
- frame_->Push(Factory::arguments_marker());
- } else {
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- frame_->PushFunction();
- frame_->PushReceiverSlotAddress();
- frame_->Push(Smi::FromInt(scope()->num_parameters()));
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
- }
-
- Variable* arguments = scope()->arguments();
- Variable* shadow = scope()->arguments_shadow();
- ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
- ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
- Result probe = frame_->Pop();
- if (probe.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !probe.handle()->IsArgumentsMarker();
- } else {
- __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
- probe.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
- return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- // References are loaded from both spilled and unspilled code. Set the
- // state to unspilled to allow that (and explicitly spill after
- // construction at the construction sites).
- bool was_in_spilled_code = in_spilled_code_;
- in_spilled_code_ = false;
-
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- // The expression is either a property or a variable proxy that rewrites
- // to a property.
- Load(property->obj());
- if (property->key()->IsPropertyName()) {
- ref->set_type(Reference::NAMED);
- } else {
- Load(property->key());
- ref->set_type(Reference::KEYED);
- }
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- // If rax is free, the register allocator prefers it. Thus the code
- // generator will load the global object into rax, which is where
- // LoadIC wants it. Most uses of Reference call LoadIC directly
- // after the reference is created.
- frame_->Spill(rax);
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->AsSlot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- // Anything else is a runtime error.
- Load(e);
- frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
- }
-
- in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- frame_->Nip(ref->size());
- ref->set_unloaded();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
- Comment cmnt(masm_, "[ ToBoolean");
-
- // The value to convert should be popped from the frame.
- Result value = frame_->Pop();
- value.ToRegister();
-
- if (value.is_number()) {
- // Fast case if TypeInfo indicates only numbers.
- if (FLAG_debug_code) {
- __ AbortIfNotNumber(value.reg());
- }
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- if (value.is_smi()) {
- value.Unuse();
- dest->Split(not_zero);
- } else {
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
- value.Unuse();
- dest->Split(not_zero);
- }
- } else {
- // Fast case checks.
- // 'false' => false.
- __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // 'true' => true.
- __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
- dest->true_target()->Branch(equal);
-
- // 'undefined' => false.
- __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
- dest->false_target()->Branch(equal);
-
- // Smi => false iff zero.
- __ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
-
- // Call the stub for all other cases.
- frame_->Push(&value); // Undo the Pop() from above.
- ToBooleanStub stub;
- Result temp = frame_->CallStub(&stub, 1);
- // Convert the result to a condition code.
- __ testq(temp.reg(), temp.reg());
- temp.Unuse();
- dest->Split(not_equal);
- }
-}
-
-
-// Call the specialized stub for a binary operation.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
- DeferredInlineBinaryOperation(Token::Value op,
- Register dst,
- Register left,
- Register right,
- OverwriteMode mode)
- : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
- set_comment("[ DeferredInlineBinaryOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register left_;
- Register right_;
- OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
- Label done;
- if ((op_ == Token::ADD)
- || (op_ == Token::SUB)
- || (op_ == Token::MUL)
- || (op_ == Token::DIV)) {
- Label call_runtime;
- Label left_smi, right_smi, load_right, do_op;
- __ JumpIfSmi(left_, &left_smi);
- __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_LEFT) {
- __ movq(dst_, left_);
- }
- __ jmp(&load_right);
-
- __ bind(&left_smi);
- __ SmiToInteger32(left_, left_);
- __ cvtlsi2sd(xmm0, left_);
- __ Integer32ToSmi(left_, left_);
- if (mode_ == OVERWRITE_LEFT) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&load_right);
- __ JumpIfSmi(right_, &right_smi);
- __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
- if (mode_ == OVERWRITE_RIGHT) {
- __ movq(dst_, right_);
- } else if (mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
- __ jmp(&do_op);
-
- __ bind(&right_smi);
- __ SmiToInteger32(right_, right_);
- __ cvtlsi2sd(xmm1, right_);
- __ Integer32ToSmi(right_, right_);
- if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
- Label alloc_failure;
- __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
- }
-
- __ bind(&do_op);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
- __ jmp(&done);
-
- __ bind(&call_runtime);
- }
- GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, left_, right_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
- Token::Value op,
- const Result& right,
- const Result& left) {
- // Set TypeInfo of result according to the operation performed.
- // We rely on the fact that smis have a 32 bit payload on x64.
- STATIC_ASSERT(kSmiValueSize == 32);
- switch (op) {
- case Token::COMMA:
- return right.type_info();
- case Token::OR:
- case Token::AND:
- // Result type can be either of the two input types.
- return operands_type;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SAR:
- case Token::SHL:
- // Result is always a smi.
- return TypeInfo::Smi();
- case Token::SHR:
- // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
- return (right.is_constant() && right.handle()->IsSmi()
- && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
- ? TypeInfo::Smi()
- : TypeInfo::Number();
- case Token::ADD:
- if (operands_type.IsNumber()) {
- return TypeInfo::Number();
- } else if (left.type_info().IsString() || right.type_info().IsString()) {
- return TypeInfo::String();
- } else {
- return TypeInfo::Unknown();
- }
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- // Result is always a number.
- return TypeInfo::Number();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Comment cmnt_token(masm_, Token::String(op));
-
- if (op == Token::COMMA) {
- // Simply discard left value.
- frame_->Nip(1);
- return;
- }
-
- Result right = frame_->Pop();
- Result left = frame_->Pop();
-
- if (op == Token::ADD) {
- const bool left_is_string = left.type_info().IsString();
- const bool right_is_string = right.type_info().IsString();
- // Make sure constant strings have string type info.
- ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
- left_is_string);
- ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
- right_is_string);
- if (left_is_string || right_is_string) {
- frame_->Push(&left);
- frame_->Push(&right);
- Result answer;
- if (left_is_string) {
- if (right_is_string) {
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- answer = frame_->CallStub(&stub, 2);
- } else {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
- }
- } else if (right_is_string) {
- answer =
- frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
- }
- answer.set_type_info(TypeInfo::String());
- frame_->Push(&answer);
- return;
- }
- // Neither operand is known to be a string.
- }
-
- bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
- bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
- bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
- bool right_is_non_smi_constant =
- right.is_constant() && !right.handle()->IsSmi();
-
- if (left_is_smi_constant && right_is_smi_constant) {
- // Compute the constant result at compile time, and leave it on the frame.
- int left_int = Smi::cast(*left.handle())->value();
- int right_int = Smi::cast(*right.handle())->value();
- if (FoldConstantSmis(op, left_int, right_int)) return;
- }
-
- // Get number type of left and right sub-expressions.
- TypeInfo operands_type =
- TypeInfo::Combine(left.type_info(), right.type_info());
-
- TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
- Result answer;
- if (left_is_non_smi_constant || right_is_non_smi_constant) {
- // Go straight to the slow case, with no smi code.
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_SMI_CODE_IN_STUB,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- } else if (right_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
- false, overwrite_mode);
- } else if (left_is_smi_constant) {
- answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
- true, overwrite_mode);
- } else {
- // Set the flags based on the operation, type and loop nesting level.
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- // For all other operations only inline the Smi check code for likely smis
- // if the operation is part of a loop.
- if (loop_nesting() > 0 &&
- (Token::IsBitOp(op) ||
- operands_type.IsInteger32() ||
- expr->type()->IsLikelySmi())) {
- answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
- } else {
- GenericBinaryOpStub stub(op,
- overwrite_mode,
- NO_GENERIC_BINARY_FLAGS,
- operands_type);
- answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
- }
- }
-
- answer.set_type_info(result_type);
- frame_->Push(&answer);
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- Object* answer_object = Heap::undefined_value();
- switch (op) {
- case Token::ADD:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
- answer_object = Smi::FromInt(left + right);
- }
- break;
- case Token::SUB:
- // Use intptr_t to detect overflow of 32-bit int.
- if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
- answer_object = Smi::FromInt(left - right);
- }
- break;
- case Token::MUL: {
- double answer = static_cast<double>(left) * right;
- if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
- // If the product is zero and the non-zero factor is negative,
- // the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
- answer_object = Smi::FromInt(static_cast<int>(answer));
- }
- }
- }
- break;
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- answer_object = Smi::FromInt(left | right);
- break;
- case Token::BIT_AND:
- answer_object = Smi::FromInt(left & right);
- break;
- case Token::BIT_XOR:
- answer_object = Smi::FromInt(left ^ right);
- break;
-
- case Token::SHL: {
- int shift_amount = right & 0x1F;
- if (Smi::IsValid(left << shift_amount)) {
- answer_object = Smi::FromInt(left << shift_amount);
- }
- break;
- }
- case Token::SHR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- unsigned_left >>= shift_amount;
- if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
- answer_object = Smi::FromInt(unsigned_left);
- }
- break;
- }
- case Token::SAR: {
- int shift_amount = right & 0x1F;
- unsigned int unsigned_left = left;
- if (left < 0) {
- // Perform arithmetic shift of a negative number by
- // complementing number, logical shifting, complementing again.
- unsigned_left = ~unsigned_left;
- unsigned_left >>= shift_amount;
- unsigned_left = ~unsigned_left;
- } else {
- unsigned_left >>= shift_amount;
- }
- ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
- answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (answer_object == Heap::undefined_value()) {
- return false;
- }
- frame_->Push(Handle<Object>(answer_object));
- return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi) {
- TypeInfo left_info = left->type_info();
- TypeInfo right_info = right->type_info();
- if (left_info.IsDouble() || left_info.IsString() ||
- right_info.IsDouble() || right_info.IsString()) {
- // We know that left and right are not both smi. Don't do any tests.
- return;
- }
-
- if (left->reg().is(right->reg())) {
- if (!left_info.IsSmi()) {
- Condition is_smi = masm()->CheckSmi(left->reg());
- both_smi->Branch(is_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- } else if (!left_info.IsSmi()) {
- if (!right_info.IsSmi()) {
- Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
- both_smi->Branch(is_smi);
- } else {
- Condition is_smi = masm()->CheckSmi(left->reg());
- both_smi->Branch(is_smi);
- }
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
- if (!right_info.IsSmi()) {
- Condition is_smi = masm()->CheckSmi(right->reg());
- both_smi->Branch(is_smi);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
- left->Unuse();
- right->Unuse();
- both_smi->Jump();
- }
- }
-}
-
-
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred) {
- if (!type.IsSmi()) {
- __ JumpIfNotSmi(reg, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(reg);
- }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred) {
- if (!left_info.IsSmi() && !right_info.IsSmi()) {
- __ JumpIfNotBothSmi(left, right, deferred->entry_label());
- } else if (!left_info.IsSmi()) {
- __ JumpIfNotSmi(left, deferred->entry_label());
- } else if (!right_info.IsSmi()) {
- __ JumpIfNotSmi(right, deferred->entry_label());
- }
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
- // Copy the type info because left and right may be overwritten.
- TypeInfo left_type_info = left->type_info();
- TypeInfo right_type_info = right->type_info();
- Token::Value op = expr->op();
- Result answer;
- // Special handling of div and mod because they use fixed registers.
- if (op == Token::DIV || op == Token::MOD) {
- // We need rax as the quotient register, rdx as the remainder
- // register, neither left nor right in rax or rdx, and left copied
- // to rax.
- Result quotient;
- Result remainder;
- bool left_is_in_rax = false;
- // Step 1: get rax for quotient.
- if ((left->is_register() && left->reg().is(rax)) ||
- (right->is_register() && right->reg().is(rax))) {
- // One or both is in rax. Use a fresh non-rdx register for
- // them.
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (fresh.reg().is(rdx)) {
- remainder = fresh;
- fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- }
- if (left->is_register() && left->reg().is(rax)) {
- quotient = *left;
- *left = fresh;
- left_is_in_rax = true;
- }
- if (right->is_register() && right->reg().is(rax)) {
- quotient = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rax);
- } else {
- // Neither left nor right is in rax.
- quotient = allocator_->Allocate(rax);
- }
- ASSERT(quotient.is_register() && quotient.reg().is(rax));
- ASSERT(!(left->is_register() && left->reg().is(rax)));
- ASSERT(!(right->is_register() && right->reg().is(rax)));
-
- // Step 2: get rdx for remainder if necessary.
- if (!remainder.is_valid()) {
- if ((left->is_register() && left->reg().is(rdx)) ||
- (right->is_register() && right->reg().is(rdx))) {
- Result fresh = allocator_->Allocate();
- ASSERT(fresh.is_valid());
- if (left->is_register() && left->reg().is(rdx)) {
- remainder = *left;
- *left = fresh;
- }
- if (right->is_register() && right->reg().is(rdx)) {
- remainder = *right;
- *right = fresh;
- }
- __ movq(fresh.reg(), rdx);
- } else {
- // Neither left nor right is in rdx.
- remainder = allocator_->Allocate(rdx);
- }
- }
- ASSERT(remainder.is_register() && remainder.reg().is(rdx));
- ASSERT(!(left->is_register() && left->reg().is(rdx)));
- ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
- left->ToRegister();
- right->ToRegister();
- frame_->Spill(rax);
- frame_->Spill(rdx);
-
- // Check that left and right are smi tagged.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- (op == Token::DIV) ? rax : rdx,
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- if (op == Token::DIV) {
- __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = quotient;
- } else {
- ASSERT(op == Token::MOD);
- __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- answer = remainder;
- }
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Special handling of shift operations because they use fixed
- // registers.
- if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
- // Move left out of rcx if necessary.
- if (left->is_register() && left->reg().is(rcx)) {
- *left = allocator_->Allocate();
- ASSERT(left->is_valid());
- __ movq(left->reg(), rcx);
- }
- right->ToRegister(rcx);
- left->ToRegister();
- ASSERT(left->is_register() && !left->reg().is(rcx));
- ASSERT(right->is_register() && right->reg().is(rcx));
-
- // We will modify right, it must be spilled.
- frame_->Spill(rcx);
-
- // Use a fresh answer register to avoid spilling the left operand.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
- // Check that both operands are smis using the answer register as a
- // temporary.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- rcx,
- overwrite_mode);
-
- Label do_op;
- // Left operand must be unchanged in left->reg() for deferred code.
- // Left operand is in answer.reg(), possibly converted to int32, for
- // inline code.
- __ movq(answer.reg(), left->reg());
- if (right_type_info.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(right->reg());
- }
- // If left is not known to be a smi, check if it is.
- // If left is not known to be a number, and it isn't a smi, check if
- // it is a HeapNumber.
- if (!left_type_info.IsSmi()) {
- __ JumpIfSmi(answer.reg(), &do_op);
- if (!left_type_info.IsNumber()) {
- // Branch if not a heapnumber.
- __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
- Factory::heap_number_map());
- deferred->Branch(not_equal);
- }
- // Load integer value into answer register using truncation.
- __ cvttsd2si(answer.reg(),
- FieldOperand(answer.reg(), HeapNumber::kValueOffset));
- // Branch if we might have overflowed.
- // (False negative for Smi::kMinValue)
- __ cmpl(answer.reg(), Immediate(0x80000000));
- deferred->Branch(equal);
- // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
- __ Integer32ToSmi(answer.reg(), answer.reg());
- } else {
- // Fast case - both are actually smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left->reg());
- }
- }
- } else {
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
- left_type_info, right_type_info, deferred);
- }
- __ bind(&do_op);
-
- // Perform the operation.
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
- break;
- case Token::SHR: {
- __ SmiShiftLogicalRight(answer.reg(),
- answer.reg(),
- rcx,
- deferred->entry_label());
- break;
- }
- case Token::SHL: {
- __ SmiShiftLeft(answer.reg(),
- answer.reg(),
- rcx);
- break;
- }
- default:
- UNREACHABLE();
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
- }
-
- // Handle the other binary operations.
- left->ToRegister();
- right->ToRegister();
- // A newly allocated register answer is used to hold the answer. The
- // registers containing left and right are not modified so they don't
- // need to be spilled in the fast case.
- answer = allocator_->Allocate();
- ASSERT(answer.is_valid());
-
- // Perform the smi tag check.
- DeferredInlineBinaryOperation* deferred =
- new DeferredInlineBinaryOperation(op,
- answer.reg(),
- left->reg(),
- right->reg(),
- overwrite_mode);
- JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
- left_type_info, right_type_info, deferred);
-
- switch (op) {
- case Token::ADD:
- __ SmiAdd(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::SUB:
- __ SmiSub(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
-
- case Token::MUL: {
- __ SmiMul(answer.reg(),
- left->reg(),
- right->reg(),
- deferred->entry_label());
- break;
- }
-
- case Token::BIT_OR:
- __ SmiOr(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_AND:
- __ SmiAnd(answer.reg(), left->reg(), right->reg());
- break;
-
- case Token::BIT_XOR:
- __ SmiXor(answer.reg(), left->reg(), right->reg());
- break;
-
- default:
- UNREACHABLE();
- break;
- }
- deferred->BindExit();
- left->Unuse();
- right->Unuse();
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- Register dst,
- Register src,
- Smi* value,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- src_(src),
- value_(value),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperation");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Register src_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
- // For mod we don't generate all the Smi code inline.
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, src_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
- DeferredInlineSmiOperationReversed(Token::Value op,
- Register dst,
- Smi* value,
- Register src,
- OverwriteMode overwrite_mode)
- : op_(op),
- dst_(dst),
- value_(value),
- src_(src),
- overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiOperationReversed");
- }
-
- virtual void Generate();
-
- private:
- Token::Value op_;
- Register dst_;
- Smi* value_;
- Register src_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
- GenericBinaryOpStub stub(
- op_,
- overwrite_mode_,
- NO_SMI_CODE_IN_STUB);
- stub.GenerateCall(masm_, value_, src_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The result of value + src is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
- DeferredInlineSmiAddReversed(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAddReversed");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, value_, dst_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
- igostub.GenerateCall(masm_, dst_, value_);
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
- // Generate inline code for a binary operation when one of the
- // operands is a constant smi. Consumes the argument "operand".
- if (IsUnsafeSmi(value)) {
- Result unsafe_operand(value);
- if (reversed) {
- return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
- overwrite_mode);
- } else {
- return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
- overwrite_mode);
- }
- }
-
- // Get the literal value.
- Smi* smi_value = Smi::cast(*value);
- int int_value = smi_value->value();
-
- Token::Value op = expr->op();
- Result answer;
- switch (op) {
- case Token::ADD: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred = NULL;
- if (reversed) {
- deferred = new DeferredInlineSmiAddReversed(operand->reg(),
- smi_value,
- overwrite_mode);
- } else {
- deferred = new DeferredInlineSmiAdd(operand->reg(),
- smi_value,
- overwrite_mode);
- }
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiAddConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- case Token::SUB: {
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- answer = *operand;
- DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- // A smi currently fits in a 32-bit Immediate.
- __ SmiSubConstant(operand->reg(),
- operand->reg(),
- smi_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
- }
-
- case Token::SAR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftArithmeticRightConstant(operand->reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- answer = *operand;
- }
- break;
-
- case Token::SHR:
- if (reversed) {
- Result constant_operand(value);
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
- deferred->BindExit();
- operand->Unuse();
- }
- break;
-
- case Token::SHL:
- if (reversed) {
- operand->ToRegister();
-
- // We need rcx to be available to hold operand, and to be spilled.
- // SmiShiftLeft implicitly modifies rcx.
- if (operand->reg().is(rcx)) {
- frame_->Spill(operand->reg());
- answer = allocator()->Allocate();
- } else {
- Result rcx_reg = allocator()->Allocate(rcx);
- // answer must not be rcx.
- answer = allocator()->Allocate();
- // rcx_reg goes out of scope.
- }
-
- DeferredInlineSmiOperationReversed* deferred =
- new DeferredInlineSmiOperationReversed(op,
- answer.reg(),
- smi_value,
- operand->reg(),
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
-
- __ Move(answer.reg(), smi_value);
- __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
- operand->Unuse();
-
- deferred->BindExit();
- } else {
- // Only the least significant 5 bits of the shift value are used.
- // In the slow case, this masking is done inside the runtime call.
- int shift_value = int_value & 0x1f;
- operand->ToRegister();
- if (shift_value == 0) {
- // Spill operand so it can be overwritten in the slow case.
- frame_->Spill(operand->reg());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- deferred->BindExit();
- answer = *operand;
- } else {
- // Use a fresh temporary for nonzero shift values.
- answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- DeferredInlineSmiOperation* deferred =
- new DeferredInlineSmiOperation(op,
- answer.reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- __ SmiShiftLeftConstant(answer.reg(),
- operand->reg(),
- shift_value);
- deferred->BindExit();
- operand->Unuse();
- }
- }
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND: {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- if (reversed) {
- // Bit operations with a constant smi are commutative.
- // We can swap left and right operands with no problem.
- // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
- overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
- }
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
- deferred);
- if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
- } else if (op == Token::BIT_XOR) {
- if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
- }
- } else {
- ASSERT(op == Token::BIT_OR);
- if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
- }
- }
- deferred->BindExit();
- answer = *operand;
- break;
- }
-
- // Generate inline code for mod of powers of 2 and negative powers of 2.
- case Token::MOD:
- if (!reversed &&
- int_value != 0 &&
- (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
- operand->ToRegister();
- frame_->Spill(operand->reg());
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
- __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
- if (int_value < 0) int_value = -int_value;
- if (int_value == 1) {
- __ Move(operand->reg(), Smi::FromInt(0));
- } else {
- __ SmiAndConstant(operand->reg(),
- operand->reg(),
- Smi::FromInt(int_value - 1));
- }
- deferred->BindExit();
- answer = *operand;
- break; // This break only applies if we generated code for MOD.
- }
- // Fall through if we did not find a power of 2 on the right hand side!
- // The next case must be the default.
-
- default: {
- Result constant_operand(value);
- if (reversed) {
- answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
- overwrite_mode);
- } else {
- answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
- overwrite_mode);
- }
- break;
- }
- }
- ASSERT(answer.is_valid());
- return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
- if (result.type_info().IsSmi()) return false;
- if (result.type_info().IsInteger32()) return false;
- if (!result.is_constant()) return true;
- if (!result.handle()->IsHeapNumber()) return false;
- return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
- switch (cc) {
- case less: return below;
- case equal: return equal;
- case less_equal: return below_equal;
- case greater: return above;
- case greater_equal: return above_equal;
- default: UNREACHABLE();
- }
- UNREACHABLE();
- return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
- bool inline_number_compare) {
- CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
- if (nan_info == kCantBothBeNaN) {
- flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
- }
- if (inline_number_compare) {
- flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
- }
- return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* dest) {
- // Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == equal);
-
- Result left_side;
- Result right_side;
- // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == greater || cc == less_equal) {
- cc = ReverseCondition(cc);
- left_side = frame_->Pop();
- right_side = frame_->Pop();
- } else {
- right_side = frame_->Pop();
- left_side = frame_->Pop();
- }
- ASSERT(cc == less || cc == equal || cc == greater_equal);
-
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi = false;
- bool left_side_constant_null = false;
- bool left_side_constant_1_char_string = false;
- if (left_side.is_constant()) {
- left_side_constant_smi = left_side.handle()->IsSmi();
- left_side_constant_null = left_side.handle()->IsNull();
- left_side_constant_1_char_string =
- (left_side.handle()->IsString() &&
- String::cast(*left_side.handle())->length() == 1 &&
- String::cast(*left_side.handle())->IsAsciiRepresentation());
- }
- bool right_side_constant_smi = false;
- bool right_side_constant_null = false;
- bool right_side_constant_1_char_string = false;
- if (right_side.is_constant()) {
- right_side_constant_smi = right_side.handle()->IsSmi();
- right_side_constant_null = right_side.handle()->IsNull();
- right_side_constant_1_char_string =
- (right_side.handle()->IsString() &&
- String::cast(*right_side.handle())->length() == 1 &&
- String::cast(*right_side.handle())->IsAsciiRepresentation());
- }
-
- if (left_side_constant_smi || right_side_constant_smi) {
- bool is_loop_condition = (node->AsExpression() != NULL) &&
- node->AsExpression()->is_loop_condition();
- ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
- left_side_constant_smi, right_side_constant_smi,
- is_loop_condition);
- } else if (left_side_constant_1_char_string ||
- right_side_constant_1_char_string) {
- if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
- // Trivial case, comparing two constants.
- int left_value = String::cast(*left_side.handle())->Get(0);
- int right_value = String::cast(*right_side.handle())->Get(0);
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant 1 character string.
- // If left side is a constant 1-character string, reverse the operands.
- // Since one side is a constant string, conversion order does not matter.
- if (left_side_constant_1_char_string) {
- Result temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may reintroduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant string, inlining the case
- // where both sides are strings.
- left_side.ToRegister();
-
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_not_string, is_string;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
- ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
- Condition is_smi = masm()->CheckSmi(left_reg);
- is_not_string.Branch(is_smi, &left_side);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(left_reg, HeapObject::kMapOffset));
- __ movzxbl(temp.reg(),
- FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
- // If we are testing for equality then make use of the symbol shortcut.
- // Check if the left hand side has the same type as the right hand
- // side (which is always a symbol).
- if (cc == equal) {
- Label not_a_symbol;
- STATIC_ASSERT(kSymbolTag != 0);
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
- __ j(zero, &not_a_symbol);
- // They are symbols, so do identity compare.
- __ Cmp(left_reg, right_side.handle());
- dest->true_target()->Branch(equal);
- dest->false_target()->Branch(not_equal);
- __ bind(&not_a_symbol);
- }
- // Call the compare stub if the left side is not a flat ascii string.
- __ andb(temp.reg(),
- Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask));
- __ cmpb(temp.reg(),
- Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- temp.Unuse();
- is_string.Branch(equal, &left_side);
-
- // Setup and call the compare stub.
- is_not_string.Bind(&left_side);
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_string.Bind(&left_side);
- // left_side is a sequential ASCII string.
- ASSERT(left_side.reg().is(left_reg));
- right_side = Result(right_val);
- Result temp2 = allocator_->Allocate();
- ASSERT(temp2.is_valid());
- // Test string equality and comparison.
- if (cc == equal) {
- Label comparison_done;
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ j(not_equal, &comparison_done);
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_val)->Get(0));
- __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
- Immediate(char_value));
- __ bind(&comparison_done);
- } else {
- __ movq(temp2.reg(),
- FieldOperand(left_side.reg(), String::kLengthOffset));
- __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
- Label comparison;
- // If the length is 0 then the subtraction gave -1 which compares less
- // than any character.
- __ j(negative, &comparison);
- // Otherwise load the first character.
- __ movzxbl(temp2.reg(),
- FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
- __ bind(&comparison);
- // Compare the first character of the string with the
- // constant 1-character string.
- uint8_t char_value =
- static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
- __ cmpb(temp2.reg(), Immediate(char_value));
- Label characters_were_different;
- __ j(not_equal, &characters_were_different);
- // If the first character is the same then the long string sorts after
- // the short one.
- __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
- Smi::FromInt(1));
- __ bind(&characters_were_different);
- }
- temp2.Unuse();
- left_side.Unuse();
- right_side.Unuse();
- dest->Split(cc);
- }
- } else {
- // Neither side is a constant Smi, constant 1-char string, or constant null.
- // If either side is a non-smi constant, or known to be a heap number,
- // skip the smi check.
- bool known_non_smi =
- (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
- (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
- left_side.type_info().IsDouble() ||
- right_side.type_info().IsDouble();
-
- NaNInformation nan_info =
- (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
- kBothCouldBeNaN :
- kCantBothBeNaN;
-
- // Inline number comparison handling any combination of smi's and heap
- // numbers if:
- // code is in a loop
- // the compare operation is different from equal
- // compare is not a for-loop comparison
- // The reason for excluding equal is that it will most likely be done
- // with smi's (not heap numbers) and the code to comparing smi's is inlined
- // separately. The same reason applies for for-loop comparison which will
- // also most likely be smi comparisons.
- bool is_loop_condition = (node->AsExpression() != NULL)
- && node->AsExpression()->is_loop_condition();
- bool inline_number_compare =
- loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
- // Left and right needed in registers for the following code.
- left_side.ToRegister();
- right_side.ToRegister();
-
- if (known_non_smi) {
- // Inlined equality check:
- // If at least one of the objects is not NaN, then if the objects
- // are identical, they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
- answer.Unuse();
- dest->Split(cc);
- } else {
- // Here we split control flow to the stub call and inlined cases
- // before finally splitting it to the control destination. We use
- // a jump target and branching to duplicate the virtual frame at
- // the first split. We manually handle the off-frame references
- // by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Register right_reg = right_side.reg();
-
- // In-line check for comparing two smis.
- JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
- if (has_valid_frame()) {
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
- }
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareFlags flags =
- ComputeCompareFlags(nan_info, inline_number_compare);
- CompareStub stub(cc, strict, flags);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- if (is_smi.is_linked()) {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
- } else {
- dest->Split(cc);
- }
- }
-
- if (is_smi.is_linked()) {
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* dest,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition) {
- if (left_side_constant_smi && right_side_constant_smi) {
- // Trivial case, comparing two constants.
- int left_value = Smi::cast(*left_side->handle())->value();
- int right_value = Smi::cast(*right_side->handle())->value();
- switch (cc) {
- case less:
- dest->Goto(left_value < right_value);
- break;
- case equal:
- dest->Goto(left_value == right_value);
- break;
- case greater_equal:
- dest->Goto(left_value >= right_value);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // Only one side is a constant Smi.
- // If left side is a constant Smi, reverse the operands.
- // Since one side is a constant Smi, conversion order does not matter.
- if (left_side_constant_smi) {
- Result* temp = left_side;
- left_side = right_side;
- right_side = temp;
- cc = ReverseCondition(cc);
- // This may re-introduce greater or less_equal as the value of cc.
- // CompareStub and the inline code both support all values of cc.
- }
- // Implement comparison against a constant Smi, inlining the case
- // where both sides are Smis.
- left_side->ToRegister();
- Register left_reg = left_side->reg();
- Smi* constant_smi = Smi::cast(*right_side->handle());
-
- if (left_side->is_smi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left_reg);
- }
- // Test smi equality and comparison by signed int comparison.
- // Both sides are smis, so we can use an Immediate.
- __ SmiCompare(left_reg, constant_smi);
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- } else {
- // Only the case where the left side could possibly be a non-smi is left.
- JumpTarget is_smi;
- if (cc == equal) {
- // We can do the equality comparison before the smi check.
- __ SmiCompare(left_reg, constant_smi);
- dest->true_target()->Branch(equal);
- Condition left_is_smi = masm_->CheckSmi(left_reg);
- dest->false_target()->Branch(left_is_smi);
- } else {
- // Do the smi check, then the comparison.
- Condition left_is_smi = masm_->CheckSmi(left_reg);
- is_smi.Branch(left_is_smi, left_side, right_side);
- }
-
- // Jump or fall through to here if we are comparing a non-smi to a
- // constant smi. If the non-smi is a heap number and this is not
- // a loop condition, inline the floating point code.
- if (!is_loop_condition) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- JumpTarget not_number;
- __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Factory::heap_number_map());
- not_number.Branch(not_equal, left_side);
- __ movsd(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = constant_smi->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ movl(temp.reg(), Immediate(value));
- __ cvtlsi2sd(xmm0, temp.reg());
- temp.Unuse();
- }
- __ ucomisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, left_side);
- left_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
- not_number.Bind(left_side);
- }
-
- // Setup and call the compare stub.
- CompareFlags flags =
- static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
- CompareStub stub(cc, strict, flags);
- Result result = frame_->CallStub(&stub, left_side, right_side);
- result.ToRegister();
- __ testq(result.reg(), result.reg());
- result.Unuse();
- if (cc == equal) {
- dest->Split(cc);
- } else {
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- // It is important for performance for this case to be at the end.
- is_smi.Bind(left_side, right_side);
- __ SmiCompare(left_reg, constant_smi);
- left_side->Unuse();
- right_side->Unuse();
- dest->Split(cc);
- }
- }
- }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
- Result* operand,
- XMMRegister xmm_reg,
- Result* left_side,
- Result* right_side,
- JumpTarget* not_numbers) {
- Label done;
- if (operand->type_info().IsDouble()) {
- // Operand is known to be a heap number, just load it.
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- } else if (operand->type_info().IsSmi()) {
- // Operand is known to be a smi. Convert it to double and keep the original
- // smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- } else {
- // Operand type not known, check for smi or heap number.
- Label smi;
- __ JumpIfSmi(operand->reg(), &smi);
- if (!operand->type_info().IsNumber()) {
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
- kScratchRegister);
- not_numbers->Branch(not_equal, left_side, right_side, taken);
- }
- __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&smi);
- // Comvert smi to float and keep the original smi.
- __ SmiToInteger32(kScratchRegister, operand->reg());
- __ cvtlsi2sd(xmm_reg, kScratchRegister);
- __ jmp(&done);
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest) {
- ASSERT(left_side->is_register());
- ASSERT(right_side->is_register());
-
- JumpTarget not_numbers;
- // Load left and right operand into registers xmm0 and xmm1 and compare.
- LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
- &not_numbers);
- LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
- &not_numbers);
- __ ucomisd(xmm0, xmm1);
- // Bail out if a NaN is involved.
- not_numbers.Branch(parity_even, left_side, right_side);
-
- // Split to destination targets based on comparison.
- left_side->Unuse();
- right_side->Unuse();
- dest->true_target()->Branch(DoubleCondition(cc));
- dest->false_target()->Jump();
-
- not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- // Push the arguments ("left-to-right") on the stack.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Record the position for debugging purposes.
- CodeForSourcePosition(position);
-
- // Use the shared code stub to call the function.
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, flags);
- Result answer = frame_->CallStub(&call_function, arg_count + 1);
- // Restore context and replace function on the stack with the
- // result of the stub invocation.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments).
- // If the arguments object of the scope has not been allocated,
- // and x.apply is Function.prototype.apply, this optimization
- // just copies y and the arguments of the current function on the
- // stack, as receiver and arguments, and calls x.
- // In the implementation comments, we call x the applicand
- // and y the receiver.
- ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
- ASSERT(arguments->IsArguments());
-
- // Load applicand.apply onto the stack. This will usually
- // give us a megamorphic load site. Not super, but it works.
- Load(applicand);
- frame()->Dup();
- Handle<String> name = Factory::LookupAsciiSymbol("apply");
- frame()->Push(name);
- Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
- __ nop();
- frame()->Push(&answer);
-
- // Load the receiver and the existing arguments object onto the
- // expression stack. Avoid allocating the arguments object here.
- Load(receiver);
- LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
- // Emit the source position information after having loaded the
- // receiver and the arguments.
- CodeForSourcePosition(position);
- // Contents of frame at this point:
- // Frame[0]: arguments object of the current function or the hole.
- // Frame[1]: receiver
- // Frame[2]: applicand.apply
- // Frame[3]: applicand.
-
- // Check if the arguments object has been lazily allocated
- // already. If so, just use that instead of copying the arguments
- // from the stack. This also deals with cases where a local variable
- // named 'arguments' has been introduced.
- frame_->Dup();
- Result probe = frame_->Pop();
- { VirtualFrame::SpilledScope spilled_scope;
- Label slow, done;
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsArgumentsMarker();
- } else {
- __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
- probe.Unuse();
- __ j(not_equal, &slow);
- }
-
- if (try_lazy) {
- Label build_args;
- // Get rid of the arguments object probe.
- frame_->Drop(); // Can be called on a spilled frame.
- // Stack now has 3 elements on it.
- // Contents of stack at this point:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // Check that the receiver really is a JavaScript object.
- __ movq(rax, Operand(rsp, 0));
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- // We allow all JSObjects including JSFunctions. As long as
- // JS_FUNCTION_TYPE is the last instance type and it is right
- // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
- // bound.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &build_args);
-
- // Check that applicand.apply is Function.prototype.apply.
- __ movq(rax, Operand(rsp, kPointerSize));
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
- __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
- __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(rcx, apply_code);
- __ j(not_equal, &build_args);
-
- // Check that applicand is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- is_smi = masm_->CheckSmi(rdi);
- __ j(is_smi, &build_args);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &build_args);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ Set(rax, scope()->num_parameters());
- for (int i = 0; i < scope()->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ SmiToInteger32(rax,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movl(rcx, rax);
- __ cmpl(rax, Immediate(kArgumentsLimit));
- __ j(above, &build_args);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- // rcx is a small non-negative integer, due to the test above.
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- // Drop applicand.apply and applicand from the stack, and push
- // the result of the function call, but leave the spilled frame
- // unchanged, with 3 elements, so it is correct when we compile the
- // slow-case code.
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rax);
- // Stack now has 1 element:
- // rsp[0]: result
- __ jmp(&done);
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // applicand.apply.
- __ bind(&build_args);
- // Stack now has 3 elements, because we have jumped from where:
- // rsp[0]: receiver
- // rsp[1]: applicand.apply
- // rsp[2]: applicand.
-
- // StoreArgumentsObject requires a correct frame, and may modify it.
- Result arguments_object = StoreArgumentsObject(false);
- frame_->SpillAll();
- arguments_object.ToRegister();
- frame_->EmitPush(arguments_object.reg());
- arguments_object.Unuse();
- // Stack and frame now have 4 elements.
- __ bind(&slow);
- }
-
- // Generic computation of x.apply(y, args) with no special optimization.
- // Flip applicand.apply and applicand on the stack, so
- // applicand looks like the receiver of the applicand.apply call.
- // Then process it as a normal function call.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- // The function and its two arguments have been dropped.
- frame_->Drop(1); // Drop the receiver as well.
- res.ToRegister();
- frame_->EmitPush(res.reg());
- // Stack now has 1 element:
- // rsp[0]: result
- if (try_lazy) __ bind(&done);
- } // End of spilled scope.
- // Restore the context register after a call.
- frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
- DeferredStackCheck() {
- set_comment("[ DeferredStackCheck");
- }
-
- virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
- StackCheckStub stub;
- __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- deferred->Branch(below);
- deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- Visit(statement);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(in_spilled_code());
- set_in_spilled_code(false);
- VisitStatements(statements);
- if (frame_ != NULL) {
- frame_->SpillAll();
- }
- set_in_spilled_code(true);
-
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- ASSERT(!in_spilled_code());
- for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
- Visit(statements->at(i));
- }
- ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ Block");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- VisitStatements(node->statements());
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals. The inevitable call
- // will sync frame elements to memory anyway, so we do it eagerly to
- // allow us to push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(rsi); // The context is the first argument.
- frame_->EmitPush(kScratchRegister);
- frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
- // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = node->proxy()->var();
- ASSERT(var != NULL); // must have been resolved
- Slot* slot = var->AsSlot();
-
- // If it was not possible to allocate the variable at compile time,
- // we need to "declare" it at runtime to make sure it actually
- // exists in the local context.
- if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Variables with a "LOOKUP" slot were introduced as non-locals
- // during variable resolution and must have mode DYNAMIC.
- ASSERT(var->is_dynamic());
- // For now, just do a runtime call. Sync the virtual frame eagerly
- // so we can simply push the arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
- PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (node->mode() == Variable::CONST) {
- frame_->EmitPush(Heap::kTheHoleValueRootIndex);
- } else if (node->fun() != NULL) {
- Load(node->fun());
- } else {
- frame_->EmitPush(Smi::FromInt(0)); // no initial value!
- }
- Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
- // Ignore the return value (declarations are statements).
- return;
- }
-
- ASSERT(!var->is_global());
-
- // If we have a function or a constant, we need to initialize the variable.
- Expression* val = NULL;
- if (node->mode() == Variable::CONST) {
- val = new Literal(Factory::the_hole_value());
- } else {
- val = node->fun(); // NULL if we don't have a function
- }
-
- if (val != NULL) {
- {
- // Set the initial value.
- Reference target(this, node->proxy());
- Load(val);
- target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
- }
- // Get rid of the assigned value (declarations are statements).
- frame_->Drop();
- }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- Load(expression);
- // Remove the lingering expression result from the top of stack.
- frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "// EmptyStatement");
- CodeForStatementPosition(node);
- // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ IfStatement");
- // Generate different code depending on which parts of the if statement
- // are present or not.
- bool has_then_stm = node->HasThenStatement();
- bool has_else_stm = node->HasElseStatement();
-
- CodeForStatementPosition(node);
- JumpTarget exit;
- if (has_then_stm && has_else_stm) {
- JumpTarget then;
- JumpTarget else_;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Visit(node->else_statement());
-
- // We may have dangling jumps to the then part.
- if (then.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Visit(node->then_statement());
-
- if (else_.is_linked()) {
- if (has_valid_frame()) exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- }
-
- } else if (has_then_stm) {
- ASSERT(!has_else_stm);
- JumpTarget then;
- ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // then part.
- if (then.is_linked()) {
- exit.Unuse();
- exit.Jump();
- then.Bind();
- Visit(node->then_statement());
- }
- } else {
- // The then label was bound.
- Visit(node->then_statement());
- }
-
- } else if (has_else_stm) {
- ASSERT(!has_then_stm);
- JumpTarget else_;
- ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.true_was_fall_through()) {
- // The exit label was bound. We may have dangling jumps to the
- // else part.
- if (else_.is_linked()) {
- exit.Unuse();
- exit.Jump();
- else_.Bind();
- Visit(node->else_statement());
- }
- } else {
- // The else label was bound.
- Visit(node->else_statement());
- }
-
- } else {
- ASSERT(!has_then_stm && !has_else_stm);
- // We only care about the condition's side effects (not its value
- // or control flow effect). LoadCondition is called without
- // forcing control flow.
- ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), &dest, false);
- if (!dest.is_used()) {
- // We got a value on the frame rather than (or in addition to)
- // control flow.
- frame_->Drop();
- }
- }
-
- if (exit.is_linked()) {
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ContinueStatement");
- CodeForStatementPosition(node);
- node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ BreakStatement");
- CodeForStatementPosition(node);
- node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- Load(node->expression());
- Result return_value = frame_->Pop();
- masm()->positions_recorder()->WriteRecordedPositions();
- if (function_return_is_shadowed_) {
- function_return_.Jump(&return_value);
- } else {
- frame_->PrepareForReturn();
- if (function_return_.is_bound()) {
- // If the function return label is already bound we reuse the
- // code by jumping to the return site.
- function_return_.Jump(&return_value);
- } else {
- function_return_.Bind(&return_value);
- GenerateReturnSequence(&return_value);
- }
- }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
- // The return value is a live (but not currently reference counted)
- // reference to rax. This is safe because the current frame does not
- // contain a reference to rax (it is prepared for the return by spilling
- // all registers).
- if (FLAG_trace) {
- frame_->Push(return_value);
- *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
- }
- return_value->ToRegister(rax);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
- // Leave the frame and return popping the arguments and the
- // receiver.
- frame_->Exit();
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, rcx);
- DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint.
- // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
- // with length 7 (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithEnterStatement");
- CodeForStatementPosition(node);
- Load(node->expression());
- Result context;
- if (node->is_catch_block()) {
- context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kPushContext, 1);
- }
-
- // Update context local.
- frame_->SaveContextRegister();
-
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WithExitStatement");
- CodeForStatementPosition(node);
- // Pop context.
- __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
- // Update context local.
- frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ SwitchStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- // Compile the switch value.
- Load(node->tag());
-
- ZoneList<CaseClause*>* cases = node->cases();
- int length = cases->length();
- CaseClause* default_clause = NULL;
-
- JumpTarget next_test;
- // Compile the case label expressions and comparisons. Exit early
- // if a comparison is unconditionally true. The target next_test is
- // bound before the loop in order to indicate control flow to the
- // first comparison.
- next_test.Bind();
- for (int i = 0; i < length && !next_test.is_unused(); i++) {
- CaseClause* clause = cases->at(i);
- // The default is not a test, but remember it for later.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- // We recycle the same target next_test for each test. Bind it if
- // the previous test has not done so and then unuse it for the
- // loop.
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- next_test.Unuse();
-
- // Duplicate the switch value.
- frame_->Dup();
-
- // Compile the label expression.
- Load(clause->label());
-
- // Compare and branch to the body if true or the next test if
- // false. Prefer the next test as a fall through.
- ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(node, equal, true, &dest);
-
- // If the comparison fell through to the true target, jump to the
- // actual body.
- if (dest.true_was_fall_through()) {
- clause->body_target()->Unuse();
- clause->body_target()->Jump();
- }
- }
-
- // If there was control flow to a next test from the last one
- // compiled, compile a jump to the default or break target.
- if (!next_test.is_unused()) {
- if (next_test.is_linked()) {
- next_test.Bind();
- }
- // Drop the switch value.
- frame_->Drop();
- if (default_clause != NULL) {
- default_clause->body_target()->Jump();
- } else {
- node->break_target()->Jump();
- }
- }
-
- // The last instruction emitted was a jump, either to the default
- // clause or the break target, or else to a case body from the loop
- // that compiles the tests.
- ASSERT(!has_valid_frame());
- // Compile case bodies as needed.
- for (int i = 0; i < length; i++) {
- CaseClause* clause = cases->at(i);
-
- // There are two ways to reach the body: from the corresponding
- // test or as the fall through of the previous body.
- if (clause->body_target()->is_linked() || has_valid_frame()) {
- if (clause->body_target()->is_linked()) {
- if (has_valid_frame()) {
- // If we have both a jump to the test and a fall through, put
- // a jump on the fall through path to avoid the dropping of
- // the switch value on the test path. The exception is the
- // default which has already had the switch value dropped.
- if (clause->is_default()) {
- clause->body_target()->Bind();
- } else {
- JumpTarget body;
- body.Jump();
- clause->body_target()->Bind();
- frame_->Drop();
- body.Bind();
- }
- } else {
- // No fall through to worry about.
- clause->body_target()->Bind();
- if (!clause->is_default()) {
- frame_->Drop();
- }
- }
- } else {
- // Otherwise, we have only fall through.
- ASSERT(has_valid_frame());
- }
-
- // We are now prepared to compile the body.
- Comment cmnt(masm_, "[ Case body");
- VisitStatements(clause->statements());
- }
- clause->body_target()->Unuse();
- }
-
- // We may not have a valid frame here so bind the break target only
- // if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DoWhileStatement");
- CodeForStatementPosition(node);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
-
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- // Label the top of the loop for the backward jump if necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case ALWAYS_FALSE:
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- break;
- case DONT_KNOW:
- // Continue is the test, so use the backward body target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- switch (info) {
- case ALWAYS_TRUE:
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case ALWAYS_FALSE:
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- case DONT_KNOW:
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- Comment cmnt(masm_, "[ DoWhileCondition");
- CodeForDoWhileConditionPosition(node);
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- break;
- }
-
- DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ WhileStatement");
- CodeForStatementPosition(node);
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the test
- // at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The body
- // is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the bottom,
- // jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
- ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
- if (slot->type() == Slot::LOCAL) {
- frame_->SetTypeForLocalAt(slot->index(), info);
- } else {
- frame_->SetTypeForParamAt(slot->index(), info);
- }
- if (FLAG_debug_code && info.IsSmi()) {
- if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
- } else {
- frame_->PushParameterAt(slot->index());
- }
- Result var = frame_->Pop();
- var.ToRegister();
- __ AbortIfNotSmi(var.reg());
- }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
- // A fast smi loop is a for loop with an initializer
- // that is a simple assignment of a smi to a stack variable,
- // a test that is a simple test of that variable against a smi constant,
- // and a step that is a increment/decrement of the variable, and
- // where the variable isn't modified in the loop body.
- // This guarantees that the variable is always a smi.
-
- Variable* loop_var = node->loop_variable();
- Smi* initial_value = *Handle<Smi>::cast(node->init()
- ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
- Smi* limit_value = *Handle<Smi>::cast(
- node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
- Token::Value compare_op =
- node->cond()->AsCompareOperation()->op();
- bool increments =
- node->next()->StatementAsCountOperation()->op() == Token::INC;
-
- // Check that the condition isn't initially false.
- bool initially_false = false;
- int initial_int_value = initial_value->value();
- int limit_int_value = limit_value->value();
- switch (compare_op) {
- case Token::LT:
- initially_false = initial_int_value >= limit_int_value;
- break;
- case Token::LTE:
- initially_false = initial_int_value > limit_int_value;
- break;
- case Token::GT:
- initially_false = initial_int_value <= limit_int_value;
- break;
- case Token::GTE:
- initially_false = initial_int_value < limit_int_value;
- break;
- default:
- UNREACHABLE();
- }
- if (initially_false) return;
-
- // Only check loop condition at the end.
-
- Visit(node->init());
-
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- // Set type and stack height of BreakTargets.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- IncrementLoopNesting();
- loop.Bind();
-
- // Set number type of the loop variable to smi.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
- Visit(node->body());
-
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- if (has_valid_frame()) {
- CodeForStatementPosition(node);
- Slot* loop_var_slot = loop_var->AsSlot();
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->TakeLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->TakeParameterAt(loop_var_slot->index());
- }
- Result loop_var_result = frame_->Pop();
- if (!loop_var_result.is_register()) {
- loop_var_result.ToRegister();
- }
- Register loop_var_reg = loop_var_result.reg();
- frame_->Spill(loop_var_reg);
- if (increments) {
- __ SmiAddConstant(loop_var_reg,
- loop_var_reg,
- Smi::FromInt(1));
- } else {
- __ SmiSubConstant(loop_var_reg,
- loop_var_reg,
- Smi::FromInt(1));
- }
-
- frame_->Push(&loop_var_result);
- if (loop_var_slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(loop_var_slot->index());
- } else {
- ASSERT(loop_var_slot->type() == Slot::PARAMETER);
- frame_->StoreToParameterAt(loop_var_slot->index());
- }
- frame_->Drop();
-
- __ SmiCompare(loop_var_reg, limit_value);
- Condition condition;
- switch (compare_op) {
- case Token::LT:
- condition = less;
- break;
- case Token::LTE:
- condition = less_equal;
- break;
- case Token::GT:
- condition = greater;
- break;
- case Token::GTE:
- condition = greater_equal;
- break;
- default:
- condition = never;
- UNREACHABLE();
- }
- loop.Branch(condition);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ ForStatement");
- CodeForStatementPosition(node);
-
- if (node->is_fast_smi_loop()) {
- GenerateFastSmiLoop(node);
- return;
- }
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
- }
-
- // If the condition is always false and has no side effects, we do not
- // need to compile anything else.
- ConditionAnalysis info = AnalyzeCondition(node->cond());
- if (info == ALWAYS_FALSE) return;
-
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function literal
- // twice.
- bool test_at_bottom = !node->may_have_function_literal();
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- IncrementLoopNesting();
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- switch (info) {
- case ALWAYS_TRUE:
- // We will not compile the test expression. Label the top of the
- // loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- break;
- case DONT_KNOW: {
- if (test_at_bottom) {
- // Continue is either the update expression or the test at the
- // bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there is no
- // update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there is an
- // update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and preferred
- // fall-through and with the break target as the false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may have
- // been unconditionally false (if there are no jumps to the
- // body).
- if (!body.is_linked()) {
- DecrementLoopNesting();
- return;
- }
-
- // Otherwise, jump around the body on the fall through and then
- // bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- break;
- }
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
-
- Visit(node->body());
-
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
-
- // Control can reach the update by falling out of the body or by a
- // continue.
- if (has_valid_frame()) {
- // Record the source position of the statement as this code which
- // is after the code for the body actually belongs to the loop
- // statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
- }
- }
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- switch (info) {
- case ALWAYS_TRUE:
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- break;
- case DONT_KNOW:
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if there
- // was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out of
- // the body, by a continue in the body, or from the update
- // expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- break;
- case ALWAYS_FALSE:
- UNREACHABLE();
- break;
- }
-
- // The break target may be already bound (by the condition), or there
- // may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ForInStatement");
- CodeForStatementPosition(node);
-
- JumpTarget primitive;
- JumpTarget jsobject;
- JumpTarget fixed_array;
- JumpTarget entry(JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check;
- JumpTarget exit;
-
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
-
- // Both SpiderMonkey and kjs ignore null and undefined in contrast
- // to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(rax);
-
- // rax: value to be iterated over
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- exit.Branch(equal);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- exit.Branch(equal);
-
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
- // Check if enumerable is already a JSObject
- // rax: value to be iterated over
- Condition is_smi = masm_->CheckSmi(rax);
- primitive.Branch(is_smi);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- jsobject.Branch(above_equal);
-
- primitive.Bind();
- frame_->EmitPush(rax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in rax, which is where we want it below
-
- jsobject.Bind();
- // Get the set of properties (as a FixedArray or Map).
- // rax: value to be iterated over
- frame_->EmitPush(rax); // Push the object being iterated over.
-
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- JumpTarget call_runtime;
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
- JumpTarget check_prototype;
- JumpTarget use_cache;
- __ movq(rcx, rax);
- loop.Bind();
- // Check that there are no elements.
- __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
- call_runtime.Branch(equal);
- // Check that there in an enum cache in the non-empty instance
- // descriptors. This is the case if the next enumeration index
- // field does not contain a smi.
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- is_smi = masm_->CheckSmi(rdx);
- call_runtime.Branch(is_smi);
- // For all objects but the receiver, check that the cache is empty.
- __ cmpq(rcx, rax);
- check_prototype.Branch(equal);
- __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
- call_runtime.Branch(not_equal);
- check_prototype.Bind();
- // Load the prototype from the map and loop if non-null.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ CompareRoot(rcx, Heap::kNullValueRootIndex);
- loop.Branch(not_equal);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- use_cache.Jump();
-
- call_runtime.Bind();
- // Call the runtime to get the property names for the object.
- frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
- // rax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ movq(rdx, rax);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
- fixed_array.Branch(not_equal);
-
- use_cache.Bind();
- // Get enum cache
- // rax: map (either the result from a call to
- // Runtime::kGetPropertyNamesFast or has been fetched directly from
- // the object)
- __ movq(rcx, rax);
- __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
- // Get the bridge array held in the enumeration index field.
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
- // Get the cache from the bridge array.
- __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- frame_->EmitPush(rax); // <- slot 3
- frame_->EmitPush(rdx); // <- slot 2
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
- entry.Jump();
-
- fixed_array.Bind();
- // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
- frame_->EmitPush(rax); // <- slot 2
-
- // Push the length of the array and the initial index onto the stack.
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
-
- // Condition.
- entry.Bind();
- // Grab the current frame's height for the break and continue
- // targets only after all the state is pushed on the frame.
- node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
- __ movq(rax, frame_->ElementAt(0)); // load the current count
- __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
- node->break_target()->Branch(below_equal);
-
- // Get the i'th entry of the array.
- __ movq(rdx, frame_->ElementAt(2));
- SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx,
- FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a zero map in the
- // permanent slow case rax: current iteration count rbx: i'th entry
- // of the enum cache
- __ movq(rdx, frame_->ElementAt(3));
- // Check if the expected map still matches that of the enumerable.
- // If not, we have to filter the key.
- // rax: current iteration count
- // rbx: i'th entry of the enum cache
- // rdx: expected map value
- __ movq(rcx, frame_->ElementAt(4));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ cmpq(rcx, rdx);
- end_del_check.Branch(equal);
-
- // Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(rbx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ movq(rbx, rax);
-
- // If the property has been removed while iterating, we just skip it.
- __ SmiCompare(rbx, Smi::FromInt(0));
- node->continue_target()->Branch(equal);
-
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. rdx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(rbx);
- { Reference each(this, node->each());
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
- if (!each.is_illegal()) {
- if (each.size() > 0) {
- frame_->EmitPush(frame_->ElementAt(each.size()));
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(2); // Drop the original and the copy of the element.
- } else {
- // If the reference has size zero then we can use the value below
- // the reference as if it were above the reference, instead of pushing
- // a new copy of it above the reference.
- each.SetValue(NOT_CONST_INIT);
- frame_->Drop(); // Drop the original of the element.
- }
- }
- }
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
-
- // Body.
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
-
- // Next. Reestablish a spilled frame in case we are coming here via
- // a continue in the body.
- node->continue_target()->Bind();
- frame_->SpillAll();
- frame_->EmitPop(rax);
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- frame_->EmitPush(rax);
- entry.Jump();
-
- // Cleanup. No need to spill because VirtualFrame::Drop is safe for
- // any frame.
- node->break_target()->Bind();
- frame_->Drop(5);
-
- // Exit.
- exit.Bind();
-
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatchStatement");
- CodeForStatementPosition(node);
-
- JumpTarget try_block;
- JumpTarget exit;
-
- try_block.Call();
- // --- Catch block ---
- frame_->EmitPush(rax);
-
- // Store the caught exception in the catch variable.
- Variable* catch_var = node->catch_var()->var();
- ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
- StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
- // Remove the exception from the stack.
- frame_->Drop();
-
- VisitStatementsAndSpill(node->catch_block()->statements());
- if (has_valid_frame()) {
- exit.Jump();
- }
-
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_CATCH_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- bool has_unlinks = false;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- has_unlinks = has_unlinks || shadows[i]->is_linked();
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // Make sure that there's nothing left on the stack above the
- // handler structure.
- if (FLAG_debug_code) {
- __ movq(kScratchRegister, handler_address);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
- __ Assert(equal, "stack pointer should point to top handler");
- }
-
- // If we can fall off the end of the try block, unlink from try chain.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame. Unlink from
- // the handler list and drop the rest of this handler from the
- // frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
- if (has_unlinks) {
- exit.Jump();
- }
- }
-
- // Generate unlink code for the (formerly) shadowing targets that
- // have been jumped to. Deallocate each shadow target.
- Result return_value;
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // Unlink from try chain; be careful not to destroy the TOS if
- // there is one.
- if (i == kReturnShadowIndex) {
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that we
- // break from (eg, for...in) may have left stuff on the stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- if (!function_return_is_shadowed_) frame_->PrepareForReturn();
- shadows[i]->other_target()->Jump(&return_value);
- } else {
- shadows[i]->other_target()->Jump();
- }
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinallyStatement");
- CodeForStatementPosition(node);
-
- // State: Used to keep track of reason for entering the finally
- // block. Should probably be extended to hold information for
- // break/continue from within the try block.
- enum { FALLING, THROWING, JUMPING };
-
- JumpTarget try_block;
- JumpTarget finally_block;
-
- try_block.Call();
-
- frame_->EmitPush(rax);
- // In case of thrown exceptions, this is where we continue.
- __ Move(rcx, Smi::FromInt(THROWING));
- finally_block.Jump();
-
- // --- Try block ---
- try_block.Bind();
-
- frame_->PushTryHandler(TRY_FINALLY_HANDLER);
- int handler_height = frame_->height();
-
- // Shadow the jump targets for all escapes from the try block, including
- // returns. During shadowing, the original target is hidden as the
- // ShadowTarget and operations on the original actually affect the
- // shadowing target.
- //
- // We should probably try to unify the escaping targets and the return
- // target.
- int nof_escapes = node->escaping_targets()->length();
- List<ShadowTarget*> shadows(1 + nof_escapes);
-
- // Add the shadow target for the function return.
- static const int kReturnShadowIndex = 0;
- shadows.Add(new ShadowTarget(&function_return_));
- bool function_return_was_shadowed = function_return_is_shadowed_;
- function_return_is_shadowed_ = true;
- ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
- // Add the remaining shadow targets.
- for (int i = 0; i < nof_escapes; i++) {
- shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
- }
-
- // Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
-
- // Stop the introduced shadowing and count the number of required unlinks.
- // After shadowing stops, the original targets are unshadowed and the
- // ShadowTargets represent the formerly shadowing targets.
- int nof_unlinks = 0;
- for (int i = 0; i < shadows.length(); i++) {
- shadows[i]->StopShadowing();
- if (shadows[i]->is_linked()) nof_unlinks++;
- }
- function_return_is_shadowed_ = function_return_was_shadowed;
-
- // Get an external reference to the handler address.
- ExternalReference handler_address(Top::k_handler_address);
-
- // If we can fall off the end of the try block, unlink from the try
- // chain and set the state on the frame to FALLING.
- if (has_valid_frame()) {
- // The next handler address is on top of the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- // Fake a top of stack value (unneeded when FALLING) and set the
- // state in ecx, then jump around the unlink blocks if any.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- __ Move(rcx, Smi::FromInt(FALLING));
- if (nof_unlinks > 0) {
- finally_block.Jump();
- }
- }
-
- // Generate code to unlink and set the state for the (formerly)
- // shadowing targets that have been jumped to.
- for (int i = 0; i < shadows.length(); i++) {
- if (shadows[i]->is_linked()) {
- // If we have come from the shadowed return, the return value is
- // on the virtual frame. We must preserve it until it is
- // pushed.
- if (i == kReturnShadowIndex) {
- Result return_value;
- shadows[i]->Bind(&return_value);
- return_value.ToRegister(rax);
- } else {
- shadows[i]->Bind();
- }
- // Because we can be jumping here (to spilled code) from
- // unspilled code, we need to reestablish a spilled frame at
- // this block.
- frame_->SpillAll();
-
- // Reload sp from the top handler, because some statements that
- // we break from (eg, for...in) may have left stuff on the
- // stack.
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- frame_->Forget(frame_->height() - handler_height);
-
- // Unlink this handler and drop it from the frame.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ movq(kScratchRegister, handler_address);
- frame_->EmitPop(Operand(kScratchRegister, 0));
- frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
- if (i == kReturnShadowIndex) {
- // If this target shadowed the function return, materialize
- // the return value on the stack.
- frame_->EmitPush(rax);
- } else {
- // Fake TOS for targets that shadowed breaks and continues.
- frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- }
- __ Move(rcx, Smi::FromInt(JUMPING + i));
- if (--nof_unlinks > 0) {
- // If this is not the last unlink block, jump around the next.
- finally_block.Jump();
- }
- }
- }
-
- // --- Finally block ---
- finally_block.Bind();
-
- // Push the state on the stack.
- frame_->EmitPush(rcx);
-
- // We keep two elements on the stack - the (possibly faked) result
- // and the state - while evaluating the finally block.
- //
- // Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
-
- if (has_valid_frame()) {
- // Restore state and return value or faked TOS.
- frame_->EmitPop(rcx);
- frame_->EmitPop(rax);
- }
-
- // Generate code to jump to the right destination for all used
- // formerly shadowing targets. Deallocate each shadow target.
- for (int i = 0; i < shadows.length(); i++) {
- if (has_valid_frame() && shadows[i]->is_bound()) {
- BreakTarget* original = shadows[i]->other_target();
- __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
- if (i == kReturnShadowIndex) {
- // The return value is (already) in rax.
- Result return_value = allocator_->Allocate(rax);
- ASSERT(return_value.is_valid());
- if (function_return_is_shadowed_) {
- original->Branch(equal, &return_value);
- } else {
- // Branch around the preparation for return which may emit
- // code.
- JumpTarget skip;
- skip.Branch(not_equal);
- frame_->PrepareForReturn();
- original->Jump(&return_value);
- skip.Bind();
- }
- } else {
- original->Branch(equal);
- }
- }
- }
-
- if (has_valid_frame()) {
- // Check if we need to rethrow the exception.
- JumpTarget exit;
- __ SmiCompare(rcx, Smi::FromInt(THROWING));
- exit.Branch(not_equal);
-
- // Rethrow exception.
- frame_->EmitPush(rax); // undo pop from above
- frame_->CallRuntime(Runtime::kReThrow, 1);
-
- // Done.
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ DebuggerStatement");
- CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Spill everything, even constants, to the frame.
- frame_->SpillAll();
-
- frame_->DebugBreak();
- // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- // The inevitable call will sync frame elements to memory anyway, so
- // we do it eagerly to allow us to push the arguments directly into
- // place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
- function_info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
- frame_->Push(function_info);
- Result answer = frame_->CallStub(&stub, 1);
- frame_->Push(&answer);
- } else {
- // Call the runtime to instantiate the function based on the
- // shared function info.
- frame_->EmitPush(rsi);
- frame_->EmitPush(function_info);
- frame_->EmitPush(pretenure
- ? Factory::true_value()
- : Factory::false_value());
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
- frame_->Push(&result);
- }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function info and instantiate it.
- Handle<SharedFunctionInfo> function_info =
- Compiler::BuildFunctionInfo(node, script());
- // Check for stack-overflow exception.
- if (function_info.is_null()) {
- SetStackOverflow();
- return;
- }
- InstantiateFunction(function_info, node->pretenure());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- InstantiateFunction(node->shared_function_info(), false);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- Comment cmnt(masm_, "[ Conditional");
- JumpTarget then;
- JumpTarget else_;
- JumpTarget exit;
- ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), &dest, true);
-
- if (dest.false_was_fall_through()) {
- // The else target was bound, so we compile the else part first.
- Load(node->else_expression());
-
- if (then.is_linked()) {
- exit.Jump();
- then.Bind();
- Load(node->then_expression());
- }
- } else {
- // The then target was bound, so we compile the then part first.
- Load(node->then_expression());
-
- if (else_.is_linked()) {
- exit.Jump();
- else_.Bind();
- Load(node->else_expression());
- }
- }
-
- exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- JumpTarget slow;
- JumpTarget done;
- Result value;
-
- // Generate fast case for loading from slots that correspond to
- // local/global variables or arguments unless they are shadowed by
- // eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(slot,
- typeof_state,
- &value,
- &slow,
- &done);
-
- slow.Bind();
- // A runtime call is inevitable. We eagerly sync frame elements
- // to memory so that we can push the arguments directly into place
- // on top of the frame.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- if (typeof_state == INSIDE_TYPEOF) {
- value =
- frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- } else {
- value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- }
-
- done.Bind(&value);
- frame_->Push(&value);
-
- } else if (slot->var()->mode() == Variable::CONST) {
- // Const slots may contain 'the hole' value (the constant hasn't been
- // initialized yet) which needs to be converted into the 'undefined'
- // value.
- //
- // We currently spill the virtual frame because constants use the
- // potentially unsafe direct-frame access of SlotOperand.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Load const");
- JumpTarget exit;
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
- exit.Bind();
- frame_->EmitPush(rcx);
-
- } else if (slot->type() == Slot::PARAMETER) {
- frame_->PushParameterAt(slot->index());
-
- } else if (slot->type() == Slot::LOCAL) {
- frame_->PushLocalAt(slot->index());
-
- } else {
- // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
- // here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because it will always be a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
- frame_->Push(&temp);
- }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- LoadFromSlot(slot, state);
-
- // Bail out quickly if we're not using lazy arguments allocation.
- if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
- // ... or if the slot isn't a non-parameter arguments slot.
- if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
- // Pop the loaded value from the stack.
- Result value = frame_->Pop();
-
- // If the loaded value is a constant, we know if the arguments
- // object has been lazily loaded yet.
- if (value.is_constant()) {
- if (value.handle()->IsArgumentsMarker()) {
- Result arguments = StoreArgumentsObject(false);
- frame_->Push(&arguments);
- } else {
- frame_->Push(&value);
- }
- return;
- }
-
- // The loaded value is in a register. If it is the sentinel that
- // indicates that we haven't loaded the arguments object yet, we
- // need to do it now.
- JumpTarget exit;
- __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
- frame_->Push(&value);
- exit.Branch(not_equal);
- Result arguments = StoreArgumentsObject(false);
- frame_->SetElementAt(0, &arguments);
- exit.Bind();
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
- Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- // Check that no extension objects have been created by calls to
- // eval from the current scope to the global scope.
- Register context = rsi;
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid()); // All non-reserved registers were available.
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- slow->Branch(not_equal, not_taken);
- }
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- context = tmp.reg();
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(tmp.reg())) {
- __ movq(tmp.reg(), context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
- __ bind(&next);
- // Terminate at global context.
- __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
- __ j(equal, &fast);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
- slow->Branch(not_equal);
- // Load next context in chain.
- __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
- __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
- __ jmp(&next);
- __ bind(&fast);
- }
- tmp.Unuse();
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- LoadGlobal();
- frame_->Push(slot->var()->name());
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Result answer = frame_->CallLoadIC(mode);
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- masm_->nop();
- return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
- *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
- done->Jump(result);
-
- } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
- Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
- Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
- if (potential_slot != NULL) {
- // Generate fast case for locals that rewrite to slots.
- // Allocate a fresh register to use as a temp in
- // ContextSlotOperandCheckExtensions and to hold the result
- // value.
- *result = allocator_->Allocate();
- ASSERT(result->is_valid());
- __ movq(result->reg(),
- ContextSlotOperandCheckExtensions(potential_slot,
- *result,
- slow));
- if (potential_slot->var()->mode() == Variable::CONST) {
- __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
- done->Branch(not_equal, result);
- __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
- }
- done->Jump(result);
- } else if (rewrite != NULL) {
- // Generate fast case for argument loads.
- Property* property = rewrite->AsProperty();
- if (property != NULL) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- Literal* key_literal = property->key()->AsLiteral();
- if (obj_proxy != NULL &&
- key_literal != NULL &&
- obj_proxy->IsArguments() &&
- key_literal->handle()->IsSmi()) {
- // Load arguments object if there are no eval-introduced
- // variables. Then load the argument from the arguments
- // object using keyed load.
- Result arguments = allocator()->Allocate();
- ASSERT(arguments.is_valid());
- __ movq(arguments.reg(),
- ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
- arguments,
- slow));
- frame_->Push(&arguments);
- frame_->Push(key_literal->handle());
- *result = EmitKeyedLoad();
- done->Jump(result);
- }
- }
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call. Since the call is inevitable,
- // we eagerly sync the virtual frame so we can directly push the
- // arguments into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- frame_->EmitPush(rsi);
- frame_->EmitPush(slot->var()->name());
-
- Result value;
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling chained assignment
- // expressions.
- frame_->Push(&value);
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- //
- // We spill the frame in the code below because the direct-frame
- // access of SlotOperand is potentially unsafe with an unspilled
- // frame.
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Init const");
- __ movq(rcx, SlotOperand(slot, rcx));
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- exit.Branch(not_equal);
- }
-
- // We must execute the store. Storing a variable must keep the (new)
- // value on the stack. This is necessary for compiling assignment
- // expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this code.
- if (slot->type() == Slot::PARAMETER) {
- frame_->StoreToParameterAt(slot->index());
- } else if (slot->type() == Slot::LOCAL) {
- frame_->StoreToLocalAt(slot->index());
- } else {
- // The other slot types (LOOKUP and GLOBAL) cannot reach here.
- //
- // The use of SlotOperand below is safe for an unspilled frame
- // because the slot is a context slot.
- ASSERT(slot->type() == Slot::CONTEXT);
- frame_->Dup();
- Result value = frame_->Pop();
- value.ToRegister();
- Result start = allocator_->Allocate();
- ASSERT(start.is_valid());
- __ movq(SlotOperand(slot, start.reg()), value.reg());
- // RecordWrite may destroy the value registers.
- //
- // TODO(204): Avoid actually spilling when the value is not
- // needed (probably the common case).
- frame_->Spill(value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
- // The results start, value, and temp are unused by going out of
- // scope.
- }
-
- exit.Bind();
- }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValue();
- }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- Comment cmnt(masm_, "[ Literal");
- frame_->Push(node->handle());
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
- UNIMPLEMENTED();
- // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
- return false;
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function. Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
- DeferredRegExpLiteral(Register boilerplate,
- Register literals,
- RegExpLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredRegExpLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // RegExp pattern (2).
- __ Push(node_->pattern());
- // RegExp flags (3).
- __ Push(node_->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
- DeferredAllocateInNewSpace(int size,
- Register target,
- int registers_to_save = 0)
- : size_(size), target_(target), registers_to_save_(registers_to_save) {
- ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
- set_comment("[ DeferredAllocateInNewSpace");
- }
- void Generate();
-
- private:
- int size_;
- Register target_;
- int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
- for (int i = 0; i < kNumRegs; i++) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ push(save_register);
- }
- }
- __ Push(Smi::FromInt(size_));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- if (!target_.is(rax)) {
- __ movq(target_, rax);
- }
- for (int i = kNumRegs - 1; i >= 0; i--) {
- if (registers_to_save_ & (1 << i)) {
- Register save_register = { i };
- __ pop(save_register);
- }
- }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- Comment cmnt(masm_, "[ RegExp Literal");
-
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the RegExp object. If so,
- // jump to the deferred code passing the literals array.
- DeferredRegExpLiteral* deferred =
- new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
-
- // Register of boilerplate contains RegExp object.
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
- DeferredAllocateInNewSpace* allocate_fallback =
- new DeferredAllocateInNewSpace(size, literals.reg());
- frame_->Push(&boilerplate);
- frame_->SpillTop();
- __ AllocateInNewSpace(size,
- literals.reg(),
- tmp.reg(),
- no_reg,
- allocate_fallback->entry_label(),
- TAG_OBJECT);
- allocate_fallback->BindExit();
- boilerplate = frame_->Pop();
- // Copy from boilerplate to clone and return clone.
-
- for (int i = 0; i < size; i += kPointerSize) {
- __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
- __ movq(FieldOperand(literals.reg(), i), tmp.reg());
- }
- frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- Comment cmnt(masm_, "[ ObjectLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Literal array.
- frame_->Push(&literals);
- // Literal index.
- frame_->Push(Smi::FromInt(node->literal_index()));
- // Constant properties.
- frame_->Push(node->constant_properties());
- // Should the object literal have fast elements?
- frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
- Result clone;
- if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- }
- frame_->Push(&clone);
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- node->CalculateEmitStore();
-
- for (int i = 0; i < node->properties()->length(); i++) {
- ObjectLiteral::Property* property = node->properties()->at(i);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- break;
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
- // else fall through.
- case ObjectLiteral::Property::COMPUTED: {
- Handle<Object> key(property->key()->handle());
- if (key->IsSymbol()) {
- // Duplicate the object as the IC receiver.
- frame_->Dup();
- Load(property->value());
- if (property->emit_store()) {
- Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false,
- strict_mode_flag());
- // A test rax instruction following the store IC call would
- // indicate the presence of an inlined version of the
- // store. Add a nop to indicate that there is no such
- // inlined version.
- __ nop();
- } else {
- frame_->Drop(2);
- }
- break;
- }
- // Fall through
- }
- case ObjectLiteral::Property::PROTOTYPE: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- Load(property->value());
- if (property->emit_store()) {
- frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
- // Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
- } else {
- frame_->Drop(3);
- }
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(1));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- case ObjectLiteral::Property::GETTER: {
- // Duplicate the object as an argument to the runtime call.
- frame_->Dup();
- Load(property->key());
- frame_->Push(Smi::FromInt(0));
- Load(property->value());
- Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
- // Ignore the result.
- break;
- }
- default: UNREACHABLE();
- }
- }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- // Load a writable copy of the function of this activation in a
- // register.
- frame_->PushFunction();
- Result literals = frame_->Pop();
- literals.ToRegister();
- frame_->Spill(literals.reg());
-
- // Load the literals array of the function.
- __ movq(literals.reg(),
- FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- frame_->Push(&literals);
- frame_->Push(Smi::FromInt(node->literal_index()));
- frame_->Push(node->constant_elements());
- int length = node->values()->length();
- Result clone;
- if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
- } else if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
- clone = frame_->CallStub(&stub, 3);
- }
- frame_->Push(&clone);
-
- // Generate code to set the elements in the array that are not
- // literals.
- for (int i = 0; i < length; i++) {
- Expression* value = node->values()->at(i);
-
- if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
- continue;
- }
-
- // The property must be set by generated code.
- Load(value);
-
- // Get the property value off the stack.
- Result prop_value = frame_->Pop();
- prop_value.ToRegister();
-
- // Fetch the array literal while leaving a copy on the stack and
- // use it to get the elements array.
- frame_->Dup();
- Result elements = frame_->Pop();
- elements.ToRegister();
- frame_->Spill(elements.reg());
- // Get the elements FixedArray.
- __ movq(elements.reg(),
- FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
- // Write to the indexed properties array.
- int offset = i * kPointerSize + FixedArray::kHeaderSize;
- __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
- // Update the write barrier for the array address.
- frame_->Spill(prop_value.reg()); // Overwritten by the write barrier.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
- }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- ASSERT(!in_spilled_code());
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- Load(node->key());
- Load(node->value());
- Result result =
- frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Variable Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL);
- Slot* slot = var->AsSlot();
- ASSERT(slot != NULL);
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Perform the assignment.
- if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
- CodeForSourcePosition(node->position());
- StoreToSlot(slot,
- node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
- }
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm(), "[ Named Property Assignment");
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
- ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
- // Initialize name and evaluate the receiver sub-expression if necessary. If
- // the receiver is trivial it is not placed on the stack at this point, but
- // loaded whenever actually needed.
- Handle<String> name;
- bool is_trivial_receiver = false;
- if (var != NULL) {
- name = var->name();
- } else {
- Literal* lit = prop->key()->AsLiteral();
- ASSERT_NOT_NULL(lit);
- name = Handle<String>::cast(lit->handle());
- // Do not materialize the receiver on the frame if it is trivial.
- is_trivial_receiver = prop->obj()->IsTrivial();
- if (!is_trivial_receiver) Load(prop->obj());
- }
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- // Initialization block consists of assignments of the form expr.x = ..., so
- // this will never be an assignment to a variable, so there must be a
- // receiver object.
- ASSERT_EQ(NULL, var);
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- frame()->Dup();
- }
- Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block() && !is_trivial_receiver) {
- frame()->Dup();
- }
-
- // Stack layout:
- // [tos] : receiver (only materialized if non-trivial)
- // [tos+1] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else if (var != NULL) {
- // The LoadIC stub expects the object in rax.
- // Freeing rax causes the code generator to load the global into it.
- frame_->Spill(rax);
- LoadGlobal();
- } else {
- frame()->Dup();
- }
- Result value = EmitNamedLoad(name, var != NULL);
- frame()->Push(&value);
- Load(node->value());
-
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- // Construct the implicit binary operation.
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : receiver (only materialized if non-trivial)
- // [tos+2] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(var == NULL || var->mode() != Variable::CONST);
- ASSERT_NE(Token::INIT_CONST, node->op());
- if (is_trivial_receiver) {
- Result value = frame()->Pop();
- frame()->Push(prop->obj());
- frame()->Push(&value);
- }
- CodeForSourcePosition(node->position());
- bool is_contextual = (var != NULL);
- Result answer = EmitNamedStore(name, is_contextual);
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- if (node->ends_initialization_block()) {
- ASSERT_EQ(NULL, var);
- // The argument to the runtime call is the receiver.
- if (is_trivial_receiver) {
- frame()->Push(prop->obj());
- } else {
- // A copy of the receiver is below the value of the assignment. Swap
- // the receiver and the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- }
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Comment cmnt(masm_, "[ Keyed Property Assignment");
- Property* prop = node->target()->AsProperty();
- ASSERT_NOT_NULL(prop);
-
- // Evaluate the receiver subexpression.
- Load(prop->obj());
-
- // Change to slow case in the beginning of an initialization block to
- // avoid the quadratic behavior of repeatedly adding fast properties.
- if (node->starts_initialization_block()) {
- frame_->Dup();
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- // Change to fast case at the end of an initialization block. To prepare for
- // that add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- if (node->ends_initialization_block()) {
- frame_->Dup();
- }
-
- // Evaluate the key subexpression.
- Load(prop->key());
-
- // Stack layout:
- // [tos] : key
- // [tos+1] : receiver
- // [tos+2] : receiver if at the end of an initialization block
-
- // Evaluate the right-hand side.
- if (node->is_compound()) {
- // For a compound assignment the right-hand side is a binary operation
- // between the current property value and the actual right-hand side.
- // Duplicate receiver and key for loading the current property value.
- frame()->PushElementAt(1);
- frame()->PushElementAt(1);
- Result value = EmitKeyedLoad();
- frame()->Push(&value);
- Load(node->value());
-
- // Perform the binary operation.
- bool overwrite_value = node->value()->ResultOverwriteAllowed();
- BinaryOperation expr(node);
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- } else {
- // For non-compound assignment just load the right-hand side.
- Load(node->value());
- }
-
- // Stack layout:
- // [tos] : value
- // [tos+1] : key
- // [tos+2] : receiver
- // [tos+3] : receiver if at the end of an initialization block
-
- // Perform the assignment. It is safe to ignore constants here.
- ASSERT(node->op() != Token::INIT_CONST);
- CodeForSourcePosition(node->position());
- Result answer = EmitKeyedStore(prop->key()->type());
- frame()->Push(&answer);
-
- // Stack layout:
- // [tos] : result
- // [tos+1] : receiver if at the end of an initialization block
-
- // Change to fast case at the end of an initialization block.
- if (node->ends_initialization_block()) {
- // The argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment. Swap the receiver and
- // the value of the assignment expression.
- Result result = frame()->Pop();
- Result receiver = frame()->Pop();
- frame()->Push(&result);
- frame()->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- // Stack layout:
- // [tos] : result
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- Property* prop = node->target()->AsProperty();
-
- if (var != NULL && !var->is_global()) {
- EmitSlotAssignment(node);
-
- } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
- (var != NULL && var->is_global())) {
- // Properties whose keys are property names and global variables are
- // treated as named property references. We do not need to consider
- // global 'this' because it is not a valid left-hand side.
- EmitNamedPropertyAssignment(node);
-
- } else if (prop != NULL) {
- // Other properties (including rewritten parameters for a function that
- // uses arguments) are keyed property assignments.
- EmitKeyedPropertyAssignment(node);
-
- } else {
- // Invalid left-hand side.
- Load(node->target());
- Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
- // The runtime call doesn't actually return but the code generator will
- // still generate code and expects a certain frame height.
- frame()->Push(&result);
- }
-
- ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- Comment cmnt(masm_, "[ Throw");
- Load(node->exception());
- Result result = frame_->CallRuntime(Runtime::kThrow, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- Comment cmnt(masm_, "[ Property");
- Reference property(this, node);
- property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- Comment cmnt(masm_, "[ Call");
-
- ZoneList<Expression*>* args = node->arguments();
-
- // Check if the function is a variable or a property.
- Expression* function = node->expression();
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- // ----------------------------------
- // JavaScript example: 'eval(arg)' // eval is not known to be shadowed
- // ----------------------------------
-
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
-
- // Prepare the stack for the call to the resolved function.
- Load(function);
-
- // Allocate a frame slot for the receiver.
- frame_->Push(Factory::undefined_value());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Result to hold the result of the function resolution and the
- // final result of the eval call.
- Result result;
-
- // If we know that eval can only be shadowed by eval-introduced
- // variables we attempt to load the global eval function directly
- // in generated code. If we succeed, there is no need to perform a
- // context lookup in the runtime system.
- JumpTarget done;
- if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
- ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
- JumpTarget slow;
- // Prepare the stack for the call to
- // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
- // function, the first argument to the eval call and the
- // receiver.
- Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &slow);
- frame_->Push(&fun);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
- done.Jump(&result);
- slow.Bind();
- }
-
- // Prepare the stack for the call to ResolvePossiblyDirectEval by
- // pushing the loaded function, the first argument to the eval
- // call and the receiver.
- frame_->PushElementAt(arg_count + 1);
- if (arg_count > 0) {
- frame_->PushElementAt(arg_count);
- } else {
- frame_->Push(Factory::undefined_value());
- }
- frame_->PushParameterAt(-1);
-
- // Push the strict mode flag.
- frame_->Push(Smi::FromInt(strict_mode_flag()));
-
- // Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
- // If we generated fast-case code bind the jump-target where fast
- // and slow case merge.
- if (done.is_linked()) done.Bind(&result);
-
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- Result receiver = allocator_->Allocate(rdx);
- frame_->SetElementAt(arg_count + 1, &result);
- frame_->SetElementAt(arg_count, &receiver);
- receiver.Unuse();
-
- // Call the function.
- CodeForSourcePosition(node->position());
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
- result = frame_->CallStub(&call_function, arg_count + 1);
-
- // Restore the context and overwrite the function on the stack with
- // the result.
- frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &result);
-
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function on the frame.
- frame_->Push(var->name());
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->Push(&result);
-
- } else if (var != NULL && var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::LOOKUP) {
- // ----------------------------------
- // JavaScript examples:
- //
- // with (obj) foo(1, 2, 3) // foo may be in obj.
- //
- // function f() {};
- // function g() {
- // eval(...);
- // f(); // f could be in extension object.
- // }
- // ----------------------------------
-
- JumpTarget slow, done;
- Result function;
-
- // Generate fast case for loading functions from slots that
- // correspond to local/global variables or arguments unless they
- // are shadowed by eval-introduced bindings.
- EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
- NOT_INSIDE_TYPEOF,
- &function,
- &slow,
- &done);
-
- slow.Bind();
- // Load the function from the context. Sync the frame so we can
- // push the arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(var->name());
- frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
- // The runtime call returns a pair of values in rax and rdx. The
- // looked-up function is in rax and the receiver is in rdx. These
- // register references are not ref counted here. We spill them
- // eagerly since they are arguments to an inevitable call (and are
- // not sharable by the arguments).
- ASSERT(!allocator()->is_used(rax));
- frame_->EmitPush(rax);
-
- // Load the receiver.
- ASSERT(!allocator()->is_used(rdx));
- frame_->EmitPush(rdx);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- JumpTarget call;
- call.Jump();
- done.Bind(&function);
- frame_->Push(&function);
- LoadGlobalReceiver();
- call.Bind();
- }
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
- } else if (property != NULL) {
- // Check if the key is a literal string.
- Literal* literal = property->key()->AsLiteral();
-
- if (literal != NULL && literal->handle()->IsSymbol()) {
- // ------------------------------------------------------------------
- // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
- // ------------------------------------------------------------------
-
- Handle<String> name = Handle<String>::cast(literal->handle());
-
- if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
- name->IsEqualTo(CStrVector("apply")) &&
- args->length() == 2 &&
- args->at(1)->AsVariableProxy() != NULL &&
- args->at(1)->AsVariableProxy()->IsArguments()) {
- // Use the optimized Function.prototype.apply that avoids
- // allocating lazily allocated arguments objects.
- CallApplyLazy(property->obj(),
- args->at(0),
- args->at(1)->AsVariableProxy(),
- node->position());
-
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Push the name of the function onto the frame.
- frame_->Push(name);
-
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
-
- } else {
- // -------------------------------------------
- // JavaScript example: 'array[index](1, 2, 3)'
- // -------------------------------------------
-
- // Load the function to call from the property through a reference.
- if (property->is_synthetic()) {
- Reference ref(this, property, false);
- ref.GetValue();
- // Use global object as receiver.
- LoadGlobalReceiver();
- // Call the function.
- CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
- } else {
- // Push the receiver onto the frame.
- Load(property->obj());
-
- // Load the name of the function.
- Load(property->key());
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&key);
- frame_->Push(&receiver);
- key.Unuse();
- receiver.Unuse();
-
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- frame_->SpillTop();
- }
-
- // Place the key on top of stack and call the IC initialization code.
- frame_->PushElementAt(arg_count + 1);
- CodeForSourcePosition(node->position());
- Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting());
- frame_->Drop(); // Drop the key still on the stack.
- frame_->RestoreContextRegister();
- frame_->Push(&result);
- }
- }
- } else {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is not global
- // ----------------------------------
-
- // Load the function.
- Load(function);
-
- // Pass the global proxy as the receiver.
- LoadGlobalReceiver();
-
- // Call the function.
- CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
- }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- Comment cmnt(masm_, "[ CallNew");
-
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments. This is different from ordinary calls, where the
- // actual function to call is resolved after the arguments have been
- // evaluated.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- Load(node->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = node->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- CodeForSourcePosition(node->position());
- Result result = frame_->CallConstructor(arg_count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- value.Unuse();
- destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (ShouldGenerateLog(args->at(0))) {
- Load(args->at(1));
- Load(args->at(2));
- frame_->CallRuntime(Runtime::kLog, 2);
- }
-#endif
- // Finally, we're expected to leave a value on the top of the stack.
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
- value.Unuse();
- destination()->Split(non_negative_smi);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result_, Heap::kNanValueRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharCodeAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need two extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredStringCharCodeAt* deferred =
- new DeferredStringCharCodeAt(object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharFromCode");
- ASSERT(args->length() == 1);
-
- Load(args->at(0));
-
- Result code = frame_->Pop();
- code.ToRegister();
- ASSERT(code.is_valid());
-
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
- code.reg(), result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_at_generator_.GenerateSlow(masm(), call_helper);
-
- __ bind(&need_conversion_);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result_, Smi::FromInt(0));
- __ jmp(exit_label());
-
- __ bind(&index_out_of_range_);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
- __ jmp(exit_label());
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateStringCharAt");
- ASSERT(args->length() == 2);
-
- Load(args->at(0));
- Load(args->at(1));
- Result index = frame_->Pop();
- Result object = frame_->Pop();
- object.ToRegister();
- index.ToRegister();
- // We might mutate the object register.
- frame_->Spill(object.reg());
-
- // We need three extra registers.
- Result result = allocator()->Allocate();
- ASSERT(result.is_valid());
- Result scratch1 = allocator()->Allocate();
- ASSERT(scratch1.is_valid());
- Result scratch2 = allocator()->Allocate();
- ASSERT(scratch2.is_valid());
-
- DeferredStringCharAt* deferred =
- new DeferredStringCharAt(object.reg(),
- index.reg(),
- scratch1.reg(),
- scratch2.reg(),
- result.reg());
- deferred->fast_case_generator()->GenerateFast(masm_);
- deferred->BindExit();
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a JS array or not.
- __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // It is a heap object - get map.
- // Check if the object is a regexp.
- __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
-
- __ Move(kScratchRegister, Factory::null_value());
- __ cmpq(obj.reg(), kScratchRegister);
- destination()->true_target()->Branch(equal);
-
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
- destination()->false_target()->Branch(below);
- __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
- obj.Unuse();
- destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
- // typeof(arg) == function).
- // It includes undetectable objects (as opposed to IsObject).
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- Condition is_smi = masm_->CheckSmi(value.reg());
- destination()->false_target()->Branch(is_smi);
- // Check that this is an object.
- __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
- value.Unuse();
- destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- Label false_result;
-
- // Check that map is loaded as expected.
- if (FLAG_debug_code) {
- __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ Assert(equal, "Map not in expected register");
- }
-
- // Check for fast case object. Generate false result for slow case object.
- __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
- __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
- __ j(equal, &false_result);
-
- // Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
- __ movq(map_result_,
- FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
- __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
- // map_result_: descriptor array
- // scratch1_: length of descriptor array
- // Calculate the end of the descriptor array.
- SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
- __ lea(scratch1_,
- Operand(
- map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
- // Calculate location of the first key name.
- __ addq(map_result_,
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
- // Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- Label entry, loop;
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(scratch2_, FieldOperand(map_result_, 0));
- __ Cmp(scratch2_, Factory::value_of_symbol());
- __ j(equal, &false_result);
- __ addq(map_result_, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(map_result_, scratch1_);
- __ j(not_equal, &loop);
-
- // Reload map as register map_result_ was used as temporary above.
- __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that it's
- // prototype is the un-modified String prototype. If not result is false.
- __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
- __ testq(scratch1_, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
- __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
- __ movq(scratch2_,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(scratch2_,
- FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
- __ cmpq(scratch1_,
- ContextOperand(
- scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, &false_result);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ Set(map_result_, 1);
- __ jmp(exit_label());
- __ bind(&false_result);
- // Set false result.
- __ Set(map_result_, 0);
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop(); // Pop the string wrapper.
- obj.ToRegister();
- ASSERT(obj.is_valid());
- if (FLAG_debug_code) {
- __ AbortIfSmi(obj.reg());
- }
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- Result map_result = allocator()->Allocate();
- ASSERT(map_result.is_valid());
- __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- destination()->true_target()->Branch(not_zero);
-
- // We need an additional two scratch registers for the deferred code.
- Result temp1 = allocator()->Allocate();
- ASSERT(temp1.is_valid());
- Result temp2 = allocator()->Allocate();
- ASSERT(temp2.is_valid());
-
- DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
- new DeferredIsStringWrapperSafeForDefaultValueOf(
- obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
- deferred->Branch(zero);
- deferred->BindExit();
- __ testq(map_result.reg(), map_result.reg());
- obj.Unuse();
- map_result.Unuse();
- temp1.Unuse();
- temp2.Unuse();
- destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- // This generates a fast version of:
- // (%_ClassOf(arg) === 'Function')
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- obj.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result obj = frame_->Pop();
- obj.ToRegister();
- Condition is_smi = masm_->CheckSmi(obj.reg());
- destination()->false_target()->Branch(is_smi);
- __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
- obj.Unuse();
- destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- // Get the frame pointer for the calling frame.
- Result fp = allocator()->Allocate();
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- fp.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
-
- Result fp = allocator_->Allocate();
- Result result = allocator_->Allocate();
- ASSERT(fp.is_valid() && result.is_valid());
-
- Label exit;
-
- // Get the number of formal parameters.
- __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(result.reg(),
- Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- result.set_type_info(TypeInfo::Smi());
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(result.reg());
- }
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
- Load(args->at(0)); // Load the object.
- Result obj = frame_->Pop();
- obj.ToRegister();
- frame_->Spill(obj.reg());
-
- // If the object is a smi, we return null.
- Condition is_smi = masm_->CheckSmi(obj.reg());
- null.Branch(is_smi);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
-
- __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(below);
-
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
- function.Branch(equal);
-
- // Check if the constructor in the map is a function.
- __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
- __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
- non_function_constructor.Branch(not_equal);
-
- // The obj register now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(obj.reg(),
- FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
- __ movq(obj.reg(),
- FieldOperand(obj.reg(),
- SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->Push(&obj);
- leave.Jump();
-
- // Functions have class 'Function'.
- function.Bind();
- frame_->Push(Factory::function_class_symbol());
- leave.Jump();
-
- // Objects with a non-function constructor have class 'Object'.
- non_function_constructor.Bind();
- frame_->Push(Factory::Object_symbol());
- leave.Jump();
-
- // Non-JS objects have class null.
- null.Bind();
- frame_->Push(Factory::null_value());
-
- // All done.
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- frame_->Dup();
- Result object = frame_->Pop();
- object.ToRegister();
- ASSERT(object.is_valid());
- // if (object->IsSmi()) return object.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi);
- // It is a heap object - get map.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- // if (!object->IsJSValue()) return object.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
- leave.Branch(not_equal);
- __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
- object.Unuse();
- frame_->SetElementAt(0, &temp);
- leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- JumpTarget leave;
- Load(args->at(0)); // Load the object.
- Load(args->at(1)); // Load the value.
- Result value = frame_->Pop();
- Result object = frame_->Pop();
- value.ToRegister();
- object.ToRegister();
-
- // if (object->IsSmi()) return value.
- Condition is_smi = masm_->CheckSmi(object.reg());
- leave.Branch(is_smi, &value);
-
- // It is a heap object - get its map.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- // if (!object->IsJSValue()) return value.
- __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
- leave.Branch(not_equal, &value);
-
- // Store the value.
- __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- Result duplicate_value = allocator_->Allocate();
- ASSERT(duplicate_value.is_valid());
- __ movq(duplicate_value.reg(), value.reg());
- // The object register is also overwritten by the write barrier and
- // possibly aliased in the frame.
- frame_->Spill(object.reg());
- __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
- scratch.reg());
- object.Unuse();
- scratch.Unuse();
- duplicate_value.Unuse();
-
- // Leave.
- leave.Bind(&value);
- frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- Load(args->at(0));
- Result key = frame_->Pop();
- // Explicitly create a constant result.
- Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
- // Call the shared stub to get to arguments[key].
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- Result result = frame_->CallStub(&stub, &key, &count);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- Load(args->at(0));
- Load(args->at(1));
- Result right = frame_->Pop();
- Result left = frame_->Pop();
- right.ToRegister();
- left.ToRegister();
- __ cmpq(right.reg(), left.reg());
- right.Unuse();
- left.Unuse();
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- // RBP value is aligned, so it should be tagged as a smi (without necesarily
- // being padded as a smi, so it should not be treated as a smi.).
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- Result rbp_as_smi = allocator_->Allocate();
- ASSERT(rbp_as_smi.is_valid());
- __ movq(rbp_as_smi.reg(), rbp);
- frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- ASSERT(args->length() == 0);
- frame_->SpillAll();
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- Result result = allocator_->Allocate(rax);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- SubStringStub stub;
- Result answer = frame_->CallStub(&stub, 3);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
-
- StringCompareStub stub;
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 4);
-
- // Load the arguments on the stack and call the runtime system.
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
- Load(args->at(3));
- RegExpExecStub stub;
- Result result = frame_->CallStub(&stub, 4);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- ASSERT_EQ(3, args->length());
- Load(args->at(0)); // Size of array, smi.
- Load(args->at(1)); // "index" property value.
- Load(args->at(2)); // "input" property value.
- RegExpConstructResultStub stub;
- Result result = frame_->CallStub(&stub, 3);
- frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst,
- Register cache,
- Register key,
- Register scratch)
- : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_; // on invocation index of finger (as int32), on exit
- // holds value being looked up.
- Register cache_; // instance of JSFunctionResultCache.
- Register key_; // key being looked up.
- Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|. |index| is int32.
-static Operand ArrayElement(Register array,
- Register index,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
- Label first_loop, search_further, second_loop, cache_miss;
-
- Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
- Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
- // Check the cache from finger to start of the cache.
- __ bind(&first_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, kEntriesIndexImm);
- __ j(less, &search_further);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &first_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&search_further);
-
- // Check the cache from end of cache up to finger.
- __ SmiToInteger32(dst_,
- FieldOperand(cache_,
- JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(scratch_,
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
- __ bind(&second_loop);
- __ subl(dst_, kEntrySizeImm);
- __ cmpl(dst_, scratch_);
- __ j(less_equal, &cache_miss);
-
- __ cmpq(ArrayElement(cache_, dst_), key_);
- __ j(not_equal, &second_loop);
-
- __ Integer32ToSmiField(
- FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
- __ movq(dst_, ArrayElement(cache_, dst_, 1));
- __ jmp(exit_label());
-
- __ bind(&cache_miss);
- __ push(cache_); // store a reference to cache
- __ push(key_); // store a key
- __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ push(key_);
- // On x64 function must be in rdi.
- __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
- ParameterCount expected(1);
- __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
- // Find a place to put new cached value into.
- Label add_new_entry, update_cache;
- __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache
- // Possible optimization: cache size is constant for the given cache
- // so technically we could use a constant here. However, if we have
- // cache miss this optimization would hardly matter much.
-
- // Check if we could add new entry to cache.
- __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ SmiToInteger32(r9,
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ cmpl(rbx, r9);
- __ j(greater, &add_new_entry);
-
- // Check if we could evict entry after finger.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ addl(rdx, kEntrySizeImm);
- Label forward;
- __ cmpl(rbx, rdx);
- __ j(greater, &forward);
- // Need to wrap over the cache.
- __ movl(rdx, kEntriesIndexImm);
- __ bind(&forward);
- __ movl(r9, rdx);
- __ jmp(&update_cache);
-
- __ bind(&add_new_entry);
- // r9 holds cache size as int32.
- __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
- // Update the cache itself.
- // r9 holds the index as int32.
- __ bind(&update_cache);
- __ pop(rbx); // restore the key
- __ Integer32ToSmiField(
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
- // Store key.
- __ movq(ArrayElement(rcx, r9), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
-
- // Store value.
- __ pop(rcx); // restore the cache.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ incl(rdx);
- // Backup rax, because the RecordWrite macro clobbers its arguments.
- __ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rax);
- __ RecordWrite(rcx, 0, rbx, rdx);
-
- if (!dst_.is(rax)) {
- __ movq(dst_, rax);
- }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- frame_->Push(Factory::undefined_value());
- return;
- }
-
- Load(args->at(1));
- Result key = frame_->Pop();
- key.ToRegister();
-
- Result cache = allocator()->Allocate();
- ASSERT(cache.is_valid());
- __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
- __ movq(cache.reg(),
- ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache.reg(),
- FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
- Result tmp = allocator()->Allocate();
- ASSERT(tmp.is_valid());
-
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
- cache.reg(),
- key.reg(),
- scratch.reg());
-
- const int kFingerOffset =
- FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- // tmp.reg() now holds finger offset as a smi.
- __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- __ cmpq(key.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize));
- deferred->Branch(not_equal);
- __ movq(tmp.reg(), FieldOperand(cache.reg(),
- tmp.reg(), times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
-
- deferred->BindExit();
- frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and jump to the runtime.
- Load(args->at(0));
-
- NumberToStringStub stub;
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- __ push(object_);
- __ push(index1_);
- __ push(index2_);
- __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateSwapElements");
-
- ASSERT_EQ(3, args->length());
-
- Load(args->at(0));
- Load(args->at(1));
- Load(args->at(2));
-
- Result index2 = frame_->Pop();
- index2.ToRegister();
-
- Result index1 = frame_->Pop();
- index1.ToRegister();
-
- Result object = frame_->Pop();
- object.ToRegister();
-
- Result tmp1 = allocator()->Allocate();
- tmp1.ToRegister();
- Result tmp2 = allocator()->Allocate();
- tmp2.ToRegister();
-
- frame_->Spill(object.reg());
- frame_->Spill(index1.reg());
- frame_->Spill(index2.reg());
-
- DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
- index1.reg(),
- index2.reg());
-
- // Fetch the map and check if array is in fast case.
- // Check that object doesn't require security checks and
- // has no indexed interceptor.
- __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
- deferred->Branch(below);
- __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(not_zero);
-
- // Check the object's elements are in fast case and writable.
- __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- deferred->Branch(not_equal);
-
- // Check that both indices are smis.
- Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
- deferred->Branch(NegateCondition(both_smi));
-
- // Check that both indices are valid.
- __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
- __ SmiCompare(tmp2.reg(), index1.reg());
- deferred->Branch(below_equal);
- __ SmiCompare(tmp2.reg(), index2.reg());
- deferred->Branch(below_equal);
-
- // Bring addresses into index1 and index2.
- __ SmiToInteger32(index1.reg(), index1.reg());
- __ lea(index1.reg(), FieldOperand(tmp1.reg(),
- index1.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(index2.reg(), index2.reg());
- __ lea(index2.reg(), FieldOperand(tmp1.reg(),
- index2.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Swap elements.
- __ movq(object.reg(), Operand(index1.reg(), 0));
- __ movq(tmp2.reg(), Operand(index2.reg(), 0));
- __ movq(Operand(index2.reg(), 0), object.reg());
- __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
- Label done;
- __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
- // Possible optimization: do a check that both values are Smis
- // (or them and test against Smi mask.)
-
- __ movq(tmp2.reg(), tmp1.reg());
- __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
- __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
- __ bind(&done);
-
- deferred->BindExit();
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- Comment cmnt(masm_, "[ GenerateCallFunction");
-
- ASSERT(args->length() >= 2);
-
- int n_args = args->length() - 2; // for receiver and function.
- Load(args->at(0)); // receiver
- for (int i = 0; i < n_args; i++) {
- Load(args->at(i + 1));
- }
- Load(args->at(n_args + 1)); // function
- Result result = frame_->CallJSFunction(n_args);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 2);
- Load(args->at(0));
- Load(args->at(1));
-
- Label allocate_return;
- // Load the two operands while leaving the values on the frame.
- frame()->Dup();
- Result exponent = frame()->Pop();
- exponent.ToRegister();
- frame()->Spill(exponent.reg());
- frame()->PushElementAt(1);
- Result base = frame()->Pop();
- base.ToRegister();
- frame()->Spill(base.reg());
-
- Result answer = allocator()->Allocate();
- ASSERT(answer.is_valid());
- ASSERT(!exponent.reg().is(base.reg()));
- JumpTarget call_runtime;
-
- // Save 1 in xmm3 - we need this several times later on.
- __ movl(answer.reg(), Immediate(1));
- __ cvtlsi2sd(xmm3, answer.reg());
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
- __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
- // Optimized version when y is an integer.
- Label powi;
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
-
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // Optimized version of pow if y is an integer.
- __ bind(&powi);
- __ SmiToInteger32(exponent.reg(), exponent.reg());
-
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movl(base.reg(), exponent.reg());
-
- // Get absolute value of exponent.
- Label no_neg;
- __ cmpl(exponent.reg(), Immediate(0));
- __ j(greater_equal, &no_neg);
- __ negl(exponent.reg());
- __ bind(&no_neg);
-
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
- __ bind(&while_true);
- __ shrl(exponent.reg(), Immediate(1));
- __ j(not_carry, &no_multiply);
- __ mulsd(xmm1, xmm0);
- __ bind(&no_multiply);
- __ testl(exponent.reg(), exponent.reg());
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // x has the original value of y - if y is negative return 1/result.
- __ testl(base.reg(), base.reg());
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ movl(answer.reg(), Immediate(0x7FB00000));
- __ movd(xmm0, answer.reg());
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- call_runtime.Branch(equal);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- call_runtime.Branch(parity_even);
-
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(base.reg(), &base_not_smi);
- __ SmiToInteger32(base.reg(), base.reg());
- __ cvtlsi2sd(xmm0, base.reg());
- __ jmp(&handle_special_cases);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- call_runtime.Branch(not_equal);
- __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
- __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- call_runtime.Branch(greater_equal);
- __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movl(answer.reg(), Immediate(0xBF000000));
- __ movd(xmm2, answer.reg());
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- call_runtime.Branch(not_equal);
-
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorpd(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- JumpTarget done;
- Label failure, success;
- __ bind(&allocate_return);
- // Make a copy of the frame to enable us to handle allocation
- // failure after the JumpTarget jump.
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
- __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
- // Remove the two original values from the frame - we only need those
- // in the case where we branch to runtime.
- frame()->Drop(2);
- exponent.Unuse();
- base.Unuse();
- done.Jump(&answer);
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- // If we experience an allocation failure we branch to runtime.
- __ bind(&failure);
- call_runtime.Bind();
- answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
- done.Bind(&answer);
- frame()->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- ASSERT_EQ(args->length(), 1);
- Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- Result result = frame_->CallStub(&stub, 1);
- frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
-
- // Leave original value on the frame if we need to call runtime.
- frame()->Dup();
- Result result = frame()->Pop();
- result.ToRegister();
- frame()->Spill(result.reg());
- Label runtime;
- Label non_smi;
- Label load_done;
- JumpTarget end;
-
- __ JumpIfNotSmi(result.reg(), &non_smi);
- __ SmiToInteger32(result.reg(), result.reg());
- __ cvtlsi2sd(xmm0, result.reg());
- __ jmp(&load_done);
- __ bind(&non_smi);
- __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &runtime);
- __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
- __ bind(&load_done);
- __ sqrtsd(xmm0, xmm0);
- // A copy of the virtual frame to allow us to go to runtime after the
- // JumpTarget jump.
- Result scratch = allocator()->Allocate();
- VirtualFrame* clone = new VirtualFrame(frame());
- __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
- __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
- frame()->Drop(1);
- scratch.Unuse();
- end.Jump(&result);
- // We only branch to runtime if we have an allocation error.
- // Use the copy of the original frame as our current frame.
- RegisterFile empty_regs;
- SetFrame(clone, &empty_regs);
- __ bind(&runtime);
- result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
- end.Bind(&result);
- frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- ASSERT_EQ(2, args->length());
- Load(args->at(0));
- Load(args->at(1));
- Result right_res = frame_->Pop();
- Result left_res = frame_->Pop();
- right_res.ToRegister();
- left_res.ToRegister();
- Result tmp_res = allocator()->Allocate();
- ASSERT(tmp_res.is_valid());
- Register right = right_res.reg();
- Register left = left_res.reg();
- Register tmp = tmp_res.reg();
- right_res.Unuse();
- left_res.Unuse();
- tmp_res.Unuse();
- __ cmpq(left, right);
- destination()->true_target()->Branch(equal);
- // Fail if either is a non-HeapObject.
- Condition either_smi =
- masm()->CheckEitherSmi(left, right, tmp);
- destination()->false_target()->Branch(either_smi);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- destination()->false_target()->Branch(not_equal);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- destination()->false_target()->Branch(not_equal);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result value = frame_->Pop();
- value.ToRegister();
- ASSERT(value.is_valid());
- __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- value.Unuse();
- destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- Load(args->at(0));
- Result string = frame_->Pop();
- string.ToRegister();
-
- Result number = allocator()->Allocate();
- ASSERT(number.is_valid());
- __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
- __ IndexFromHash(number.reg(), number.reg());
- string.Unuse();
- frame_->Push(&number);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- frame_->Push(Factory::undefined_value());
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- if (CheckForInlineRuntimeCall(node)) {
- return;
- }
-
- ZoneList<Expression*>* args = node->arguments();
- Comment cmnt(masm_, "[ CallRuntime");
- Runtime::Function* function = node->function();
-
- if (function == NULL) {
- // Push the builtins object found in the current global object.
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(), GlobalObjectOperand());
- __ movq(temp.reg(),
- FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
- frame_->Push(&temp);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
-
- if (function == NULL) {
- // Call the JS runtime function.
- frame_->Push(node->name());
- Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
- arg_count,
- loop_nesting_);
- frame_->RestoreContextRegister();
- frame_->Push(&answer);
- } else {
- // Call the C runtime function.
- Result answer = frame_->CallRuntime(function, arg_count);
- frame_->Push(&answer);
- }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- Comment cmnt(masm_, "[ UnaryOperation");
-
- Token::Value op = node->op();
-
- if (op == Token::NOT) {
- // Swap the true and false targets but keep the same actual label
- // as the fall through.
- destination()->Invert();
- LoadCondition(node->expression(), destination(), true);
- // Swap the labels back.
- destination()->Invert();
-
- } else if (op == Token::DELETE) {
- Property* property = node->expression()->AsProperty();
- if (property != NULL) {
- Load(property->obj());
- Load(property->key());
- frame_->Push(Smi::FromInt(strict_mode_flag()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
- frame_->Push(&answer);
- return;
- }
-
- Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
- if (variable != NULL) {
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is.
- ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
- Slot* slot = variable->AsSlot();
- if (variable->is_global()) {
- LoadGlobal();
- frame_->Push(variable->name());
- frame_->Push(Smi::FromInt(kNonStrictMode));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 3);
- frame_->Push(&answer);
-
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to delete from the context holding the named
- // variable. Sync the virtual frame eagerly so we can push the
- // arguments directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(rsi);
- frame_->EmitPush(variable->name());
- Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
- frame_->Push(&answer);
- } else {
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
- }
- } else {
- // Default: Result of deleting expressions is true.
- Load(node->expression()); // may have side-effects
- frame_->SetElementAt(0, Factory::true_value());
- }
-
- } else if (op == Token::TYPEOF) {
- // Special case for loading the typeof expression; see comment on
- // LoadTypeofExpression().
- LoadTypeofExpression(node->expression());
- Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
- frame_->Push(&answer);
-
- } else if (op == Token::VOID) {
- Expression* expression = node->expression();
- if (expression && expression->AsLiteral() && (
- expression->AsLiteral()->IsTrue() ||
- expression->AsLiteral()->IsFalse() ||
- expression->AsLiteral()->handle()->IsNumber() ||
- expression->AsLiteral()->handle()->IsString() ||
- expression->AsLiteral()->handle()->IsJSRegExp() ||
- expression->AsLiteral()->IsNull())) {
- // Omit evaluating the value of the primitive literal.
- // It will be discarded anyway, and can have no side effect.
- frame_->Push(Factory::undefined_value());
- } else {
- Load(node->expression());
- frame_->SetElementAt(0, Factory::undefined_value());
- }
-
- } else {
- bool can_overwrite = node->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- bool no_negative_zero = node->expression()->no_negative_zero();
- Load(node->expression());
- switch (op) {
- case Token::NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- UNREACHABLE(); // handled above
- break;
-
- case Token::SUB: {
- GenericUnaryOpStub stub(
- Token::SUB,
- overwrite,
- NO_UNARY_FLAGS,
- no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
- Result operand = frame_->Pop();
- Result answer = frame_->CallStub(&stub, &operand);
- answer.set_type_info(TypeInfo::Number());
- frame_->Push(&answer);
- break;
- }
-
- case Token::BIT_NOT: {
- // Smi check.
- JumpTarget smi_label;
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- operand.ToRegister();
-
- Condition is_smi = masm_->CheckSmi(operand.reg());
- smi_label.Branch(is_smi, &operand);
-
- GenericUnaryOpStub stub(Token::BIT_NOT,
- overwrite,
- NO_UNARY_SMI_CODE_IN_STUB);
- Result answer = frame_->CallStub(&stub, &operand);
- continue_label.Jump(&answer);
-
- smi_label.Bind(&answer);
- answer.ToRegister();
- frame_->Spill(answer.reg());
- __ SmiNot(answer.reg(), answer.reg());
- continue_label.Bind(&answer);
- answer.set_type_info(TypeInfo::Smi());
- frame_->Push(&answer);
- break;
- }
-
- case Token::ADD: {
- // Smi check.
- JumpTarget continue_label;
- Result operand = frame_->Pop();
- TypeInfo operand_info = operand.type_info();
- operand.ToRegister();
- Condition is_smi = masm_->CheckSmi(operand.reg());
- continue_label.Branch(is_smi, &operand);
- frame_->Push(&operand);
- Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
- CALL_FUNCTION, 1);
-
- continue_label.Bind(&answer);
- if (operand_info.IsSmi()) {
- answer.set_type_info(TypeInfo::Smi());
- } else if (operand_info.IsInteger32()) {
- answer.set_type_info(TypeInfo::Integer32());
- } else {
- answer.set_type_info(TypeInfo::Number());
- }
- frame_->Push(&answer);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub. The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
- DeferredPrefixCountOperation(Register dst,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged. Call into the runtime
-// to convert the argument to a number. Update the original value in
-// old. Call the specialized add or subtract stub. The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
- DeferredPostfixCountOperation(Register dst,
- Register old,
- bool is_increment,
- TypeInfo input_type)
- : dst_(dst),
- old_(old),
- is_increment_(is_increment),
- input_type_(input_type) {
- set_comment("[ DeferredCountOperation");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Register old_;
- bool is_increment_;
- TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
- Register left;
- if (input_type_.IsNumber()) {
- __ push(dst_); // Save the input to use as the old value.
- left = dst_;
- } else {
- __ push(dst_);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- __ push(rax); // Save the result of ToNumber to use as the old value.
- left = rax;
- }
-
- GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Number());
- stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- Comment cmnt(masm_, "[ CountOperation");
-
- bool is_postfix = node->is_postfix();
- bool is_increment = node->op() == Token::INC;
-
- Variable* var = node->expression()->AsVariableProxy()->AsVariable();
- bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- // Postfix operations need a stack slot under the reference to hold
- // the old value while the new value is being stored. This is so that
- // in the case that storing the new value requires a call, the old
- // value will be in the frame to be spilled.
- if (is_postfix) frame_->Push(Smi::FromInt(0));
-
- // A constant reference is not saved to, so the reference is not a
- // compound assignment reference.
- { Reference target(this, node->expression(), !is_const);
- if (target.is_illegal()) {
- // Spoof the virtual frame to have the expected height (one higher
- // than on entry).
- if (!is_postfix) frame_->Push(Smi::FromInt(0));
- return;
- }
- target.TakeValue();
-
- Result new_value = frame_->Pop();
- new_value.ToRegister();
-
- Result old_value; // Only allocated in the postfix case.
- if (is_postfix) {
- // Allocate a temporary to preserve the old value.
- old_value = allocator_->Allocate();
- ASSERT(old_value.is_valid());
- __ movq(old_value.reg(), new_value.reg());
-
- // The return value for postfix operations is ToNumber(input).
- // Keep more precise type info if the input is some kind of
- // number already. If the input is not a number we have to wait
- // for the deferred code to convert it.
- if (new_value.type_info().IsNumber()) {
- old_value.set_type_info(new_value.type_info());
- }
- }
- // Ensure the new value is writable.
- frame_->Spill(new_value.reg());
-
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment,
- new_value.type_info());
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment,
- new_value.type_info());
- }
-
- if (new_value.is_smi()) {
- if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
- } else {
- __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
- }
- if (is_increment) {
- __ SmiAddConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- } else {
- __ SmiSubConstant(new_value.reg(),
- new_value.reg(),
- Smi::FromInt(1),
- deferred->entry_label());
- }
- deferred->BindExit();
-
- // Postfix count operations return their input converted to
- // number. The case when the input is already a number is covered
- // above in the allocation code for old_value.
- if (is_postfix && !new_value.type_info().IsNumber()) {
- old_value.set_type_info(TypeInfo::Number());
- }
-
- new_value.set_type_info(TypeInfo::Number());
-
- // Postfix: store the old value in the allocated slot under the
- // reference.
- if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
- frame_->Push(&new_value);
- // Non-constant: update the reference.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
- }
-
- // Postfix: drop the new value and use the old.
- if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- // According to ECMA-262 section 11.11, page 58, the binary logical
- // operators must yield the result of one of the two expressions
- // before any ToBoolean() conversions. This means that the value
- // produced by a && or || operator is not necessarily a boolean.
-
- // NOTE: If the left hand side produces a materialized value (not
- // control flow), we force the right hand side to do the same. This
- // is necessary because we assume that if we get control flow on the
- // last path out of an expression we got it on all paths.
- if (node->op() == Token::AND) {
- JumpTarget is_true;
- ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.false_was_fall_through()) {
- // The current false target was used as the fall-through. If
- // there are no dangling jumps to is_true then the left
- // subexpression was unconditionally false. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_true.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current false target was a forward jump then we have a
- // valid frame, we have just bound the false target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->false_target()->Unuse();
- destination()->false_target()->Jump();
- }
- is_true.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have actually just jumped to or bound the current false
- // target but the current control destination is not marked as
- // used.
- destination()->Use(false);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_true
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_true
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'false' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&pop_and_continue, &exit, true);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_true.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
-
- } else {
- ASSERT(node->op() == Token::OR);
- JumpTarget is_false;
- ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), &dest, false);
-
- if (dest.true_was_fall_through()) {
- // The current true target was used as the fall-through. If
- // there are no dangling jumps to is_false then the left
- // subexpression was unconditionally true. Otherwise we have
- // paths where we do have to evaluate the right subexpression.
- if (is_false.is_linked()) {
- // We need to compile the right subexpression. If the jump to
- // the current true target was a forward jump then we have a
- // valid frame, we have just bound the true target, and we
- // have to jump around the code for the right subexpression.
- if (has_valid_frame()) {
- destination()->true_target()->Unuse();
- destination()->true_target()->Jump();
- }
- is_false.Bind();
- // The left subexpression compiled to control flow, so the
- // right one is free to do so as well.
- LoadCondition(node->right(), destination(), false);
- } else {
- // We have just jumped to or bound the current true target but
- // the current control destination is not marked as used.
- destination()->Use(true);
- }
-
- } else if (dest.is_used()) {
- // The left subexpression compiled to control flow (and is_false
- // was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), destination(), false);
-
- } else {
- // We have a materialized value on the frame, so we exit with
- // one on all paths. There are possibly also jumps to is_false
- // from nested subexpressions.
- JumpTarget pop_and_continue;
- JumpTarget exit;
-
- // Avoid popping the result if it converts to 'true' using the
- // standard ToBoolean() conversion as described in ECMA-262,
- // section 9.2, page 30.
- //
- // Duplicate the TOS value. The duplicate will be popped by
- // ToBoolean.
- frame_->Dup();
- ControlDestination dest(&exit, &pop_and_continue, false);
- ToBoolean(&dest);
-
- // Pop the result of evaluating the first part.
- frame_->Drop();
-
- // Compile right side expression.
- is_false.Bind();
- Load(node->right());
-
- // Exit (always with a materialized value).
- exit.Bind();
- }
- }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
-
- if (node->op() == Token::AND || node->op() == Token::OR) {
- GenerateLogicalBooleanOperation(node);
- } else {
- // NOTE: The code below assumes that the slow cases (calls to runtime)
- // never return a constant/immutable object.
- OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->ResultOverwriteAllowed()) {
- overwrite_mode = OVERWRITE_RIGHT;
- }
-
- if (node->left()->IsTrivial()) {
- Load(node->right());
- Result right = frame_->Pop();
- frame_->Push(node->left());
- frame_->Push(&right);
- } else {
- Load(node->left());
- Load(node->right());
- }
- GenericBinaryOperation(node, overwrite_mode);
- }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- Comment cmnt(masm_, "[ CompareOperation");
-
- // Get the expressions from the node.
- Expression* left = node->left();
- Expression* right = node->right();
- Token::Value op = node->op();
- // To make typeof testing for natives implemented in JavaScript really
- // efficient, we generate special code for expressions of the form:
- // 'typeof <expression> == <string>'.
- UnaryOperation* operation = left->AsUnaryOperation();
- if ((op == Token::EQ || op == Token::EQ_STRICT) &&
- (operation != NULL && operation->op() == Token::TYPEOF) &&
- (right->AsLiteral() != NULL &&
- right->AsLiteral()->handle()->IsString())) {
- Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
- // Load the operand and move it to a register.
- LoadTypeofExpression(operation->expression());
- Result answer = frame_->Pop();
- answer.ToRegister();
-
- if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->true_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable string object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
- answer.Unuse();
- destination()->Split(below); // Unsigned byte comparison needed.
-
- } else if (check->Equals(Heap::boolean_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
- destination()->true_target()->Branch(equal);
- __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::undefined_symbol())) {
- __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- answer.Unuse();
- destination()->Split(not_zero);
-
- } else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- frame_->Spill(answer.reg());
- __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
- destination()->true_target()->Branch(equal);
- // Regular expressions are callable so typeof == 'function'.
- __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
- answer.Unuse();
- destination()->Split(equal);
-
- } else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(answer.reg());
- destination()->false_target()->Branch(is_smi);
- __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
- destination()->true_target()->Branch(equal);
-
- // Regular expressions are typeof == 'function', not 'object'.
- __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
- destination()->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- destination()->false_target()->Branch(not_zero);
- __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
- destination()->false_target()->Branch(below);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- answer.Unuse();
- destination()->Split(below_equal);
- } else {
- // Uncommon case: typeof testing against a string literal that is
- // never returned from the typeof operator.
- answer.Unuse();
- destination()->Goto(false);
- }
- return;
- }
-
- Condition cc = no_condition;
- bool strict = false;
- switch (op) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN: {
- Load(left);
- Load(right);
- Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
- frame_->Push(&answer); // push the result
- return;
- }
- case Token::INSTANCEOF: {
- Load(left);
- Load(right);
- InstanceofStub stub(InstanceofStub::kNoFlags);
- Result answer = frame_->CallStub(&stub, 2);
- answer.ToRegister();
- __ testq(answer.reg(), answer.reg());
- answer.Unuse();
- destination()->Split(zero);
- return;
- }
- default:
- UNREACHABLE();
- }
-
- if (left->IsTrivial()) {
- Load(right);
- Result right_result = frame_->Pop();
- frame_->Push(left);
- frame_->Push(&right_result);
- } else {
- Load(left);
- Load(right);
- }
-
- Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- Comment cmnt(masm_, "[ CompareToNull");
-
- Load(node->expression());
- Result operand = frame_->Pop();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (node->is_strict()) {
- operand.Unuse();
- destination()->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- destination()->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- destination()->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- destination()->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- destination()->Split(not_zero);
- }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
- && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
- && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
- && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
- && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
- && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
- && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
- && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
- && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
-}
-#endif
-
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst. The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- DeferredReferenceGetNamedValue(Register dst,
- Register receiver,
- Handle<String> name)
- : dst_(dst), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Handle<String> name_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- if (!receiver_.is(rax)) {
- __ movq(rax, receiver_);
- }
- __ Move(rcx, name_);
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The call must be followed by a test rax instruction to indicate
- // that the inobject property case was inlined.
- //
- // Store the delta to the map check instruction here in the test
- // instruction. Use masm_-> instead of the __ macro since the
- // latter can't return a value.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::named_load_inline_miss, 1);
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetKeyedValue(Register dst,
- Register receiver,
- Register key)
- : dst_(dst), receiver_(receiver), key_(key) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Label patch_site_;
- Register dst_;
- Register receiver_;
- Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- if (receiver_.is(rdx)) {
- if (!key_.is(rax)) {
- __ movq(rax, key_);
- } // else do nothing.
- } else if (receiver_.is(rax)) {
- if (key_.is(rdx)) {
- __ xchg(rax, rdx);
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rax, key_);
- }
- } else if (key_.is(rax)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rax, key_);
- __ movq(rdx, receiver_);
- }
- // Calculate the delta from the IC call instruction to the map check
- // movq instruction in the inlined version. This delta is stored in
- // a test(rax, delta) instruction after the call so that we can find
- // it in the IC initialization code and patch the movq instruction.
- // This means that we cannot allow test instructions after calls to
- // KeyedLoadIC stubs in other places.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instruction to the
- // test instruction. We use masm_-> directly here instead of the __
- // macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- // TODO(X64): Consider whether it's worth switching the test to a
- // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
- // be generated normally.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
-
- if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver,
- StrictModeFlag strict_mode)
- : value_(value),
- key_(key),
- receiver_(receiver),
- strict_mode_(strict_mode) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- Label* patch_site() { return &patch_site_; }
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
- Label patch_site_;
- StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
- // Move value, receiver, and key to registers rax, rdx, and rcx, as
- // the IC stub expects.
- // Move value to rax, using xchg if the receiver or key is in rax.
- if (!value_.is(rax)) {
- if (!receiver_.is(rax) && !key_.is(rax)) {
- __ movq(rax, value_);
- } else {
- __ xchg(rax, value_);
- // Update receiver_ and key_ if they are affected by the swap.
- if (receiver_.is(rax)) {
- receiver_ = value_;
- } else if (receiver_.is(value_)) {
- receiver_ = rax;
- }
- if (key_.is(rax)) {
- key_ = value_;
- } else if (key_.is(value_)) {
- key_ = rax;
- }
- }
- }
- // Value is now in rax. Its original location is remembered in value_,
- // and the value is restored to value_ before returning.
- // The variables receiver_ and key_ are not preserved.
- // Move receiver and key to rdx and rcx, swapping if necessary.
- if (receiver_.is(rdx)) {
- if (!key_.is(rcx)) {
- __ movq(rcx, key_);
- } // Else everything is already in the right place.
- } else if (receiver_.is(rcx)) {
- if (key_.is(rdx)) {
- __ xchg(rcx, rdx);
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rdx, receiver_);
- __ movq(rcx, key_);
- }
- } else if (key_.is(rcx)) {
- __ movq(rdx, receiver_);
- } else {
- __ movq(rcx, key_);
- __ movq(rdx, receiver_);
- }
-
- // Call the IC stub.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // The delta from the start of the map-compare instructions (initial movq)
- // to the test instruction. We use masm_-> directly here instead of the
- // __ macro because the macro sometimes uses macro expansion to turn
- // into something that can't return a value. This is encountered
- // when doing generated code coverage tests.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
- // Here we use masm_-> instead of the __ macro because this is the
- // instruction that gets patched and coverage code gets in the way.
- masm_->testl(rax, Immediate(-delta_to_patch_site));
- // Restore value (returned from store IC).
- if (!value_.is(rax)) __ movq(value_, rax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Do not inline the inobject property case for loads from the global
- // object. Also do not inline for unoptimized code. This saves time
- // in the code generator. Unoptimized code is toplevel code or code
- // that is not in a loop.
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- Comment cmnt(masm(), "[ Load from named Property");
- frame()->Push(name);
-
- RelocInfo::Mode mode = is_contextual
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- result = frame()->CallLoadIC(mode);
- // A test rax instruction following the call signals that the
- // inobject property case was inlined. Ensure that there is not
- // a test rax instruction here.
- __ nop();
- } else {
- // Inline the inobject property case.
- Comment cmnt(masm(), "[ Inlined named property load");
- Result receiver = frame()->Pop();
- receiver.ToRegister();
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
- // Cannot use r12 for receiver, because that changes
- // the distance between a call and a fixup location,
- // due to a special encoding of r12 as r/m in a ModR/M byte.
- if (receiver.reg().is(r12)) {
- frame()->Spill(receiver.reg()); // It will be overwritten with result.
- // Swap receiver and value.
- __ movq(result.reg(), receiver.reg());
- Result temp = receiver;
- receiver = result;
- result = temp;
- }
-
- DeferredReferenceGetNamedValue* deferred =
- new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
-
- // Check that the receiver is a heap object.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- __ bind(deferred->patch_site());
- // This is the map check instruction that will be patched (so we can't
- // use the double underscore macro that may insert instructions).
- // Initially use an invalid map to force a failure.
- masm()->Move(kScratchRegister, Factory::null_value());
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed
- // size which allows the assert below to succeed and patching to work.
- // Don't use deferred->Branch(...), since that might add coverage code.
- masm()->j(not_equal, deferred->entry_label());
-
- // The delta from the patch label to the load offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
- LoadIC::kOffsetToLoadInstruction);
- // The initial (invalid) offset has to be large enough to force
- // a 32-bit instruction encoding to allow patching with an
- // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
-
- __ IncrementCounter(&Counters::named_load_inline, 1);
- deferred->BindExit();
- }
- ASSERT(frame()->height() == original_height - 1);
- return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
- int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
- Result result;
- if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // A test rax instruction following the call signals that the inobject
- // property case was inlined. Ensure that there is not a test rax
- // instruction here.
- __ nop();
- } else {
- // Inline the in-object property case.
- JumpTarget slow, done;
- Label patch_site;
-
- // Get the value and receiver from the stack.
- Result value = frame()->Pop();
- value.ToRegister();
- Result receiver = frame()->Pop();
- receiver.ToRegister();
-
- // Allocate result register.
- result = allocator()->Allocate();
- ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
- // Cannot use r12 for receiver, because that changes
- // the distance between a call and a fixup location,
- // due to a special encoding of r12 as r/m in a ModR/M byte.
- if (receiver.reg().is(r12)) {
- frame()->Spill(receiver.reg()); // It will be overwritten with result.
- // Swap receiver and value.
- __ movq(result.reg(), receiver.reg());
- Result temp = receiver;
- receiver = result;
- result = temp;
- }
-
- // Check that the receiver is a heap object.
- Condition is_smi = masm()->CheckSmi(receiver.reg());
- slow.Branch(is_smi, &value, &receiver);
-
- // This is the map check instruction that will be patched.
- // Initially use an invalid map to force a failure. The exact
- // instruction sequence is important because we use the
- // kOffsetToStoreInstruction constant for patching. We avoid using
- // the __ macro for the following two instructions because it
- // might introduce extra instructions.
- __ bind(&patch_site);
- masm()->Move(kScratchRegister, Factory::null_value());
- masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- // This branch is always a forwards branch so it's always a fixed size
- // which allows the assert below to succeed and patching to work.
- slow.Branch(not_equal, &value, &receiver);
-
- // The delta from the patch label to the store offset must be
- // statically known.
- ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
- StoreIC::kOffsetToStoreInstruction);
-
- // The initial (invalid) offset has to be large enough to force a 32-bit
- // instruction encoding to allow patching with an arbitrary offset. Use
- // kMaxInt (minus kHeapObjectTag).
- int offset = kMaxInt;
- __ movq(FieldOperand(receiver.reg(), offset), value.reg());
- __ movq(result.reg(), value.reg());
-
- // Allocate scratch register for write barrier.
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
-
- // The write barrier clobbers all input registers, so spill the
- // receiver and the value.
- frame_->Spill(receiver.reg());
- frame_->Spill(value.reg());
-
- // If the receiver and the value share a register allocate a new
- // register for the receiver.
- if (receiver.reg().is(value.reg())) {
- receiver = allocator()->Allocate();
- ASSERT(receiver.is_valid());
- __ movq(receiver.reg(), value.reg());
- }
-
- // Update the write barrier. To save instructions in the inlined
- // version we do not filter smis.
- Label skip_write_barrier;
- __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
- int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ lea(scratch.reg(), Operand(receiver.reg(), offset));
- __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
- if (FLAG_debug_code) {
- __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- }
- __ bind(&skip_write_barrier);
- value.Unuse();
- scratch.Unuse();
- receiver.Unuse();
- done.Jump(&result);
-
- slow.Bind(&value, &receiver);
- frame()->Push(&receiver);
- frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
- // Encode the offset to the map check instruction and the offset
- // to the write barrier store address computation in a test rax
- // instruction.
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
- __ testl(rax,
- Immediate((delta_to_record_write << 16) | delta_to_patch_site));
- done.Bind(&result);
- }
-
- ASSERT_EQ(expected_height, frame()->height());
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (loop_nesting() > 0) {
- Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- // Allocate the temporary early so that we use rax if it is free.
- Result elements = allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- Result key = frame_->Pop();
- Result receiver = frame_->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // If key and receiver are shared registers on the frame, their values will
- // be automatically saved and restored when going to deferred code.
- // The result is returned in elements, which is not shared.
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(elements.reg(),
- receiver.reg(),
- key.reg());
-
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the receiver has the expected map.
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use a load
- // from the root array to load null_value, since the load must be patched
- // with the expected receiver map, which is not in the root array.
- masm_->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ AssertFastElements(elements.reg());
-
- // Check that key is within bounds.
- __ SmiCompare(key.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- SmiIndex index =
- masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(elements.reg(),
- FieldOperand(elements.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- result = elements;
- __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- } else {
- Comment cmnt(masm_, "[ Load from keyed Property");
- result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 2);
- return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
- int original_height = frame()->height();
-#endif
- Result result;
- // Generate inlined version of the keyed store if the code is in a loop
- // and the key is likely to be a smi.
- if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
- Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- result = frame()->Pop();
- Result key = frame()->Pop();
- Result receiver = frame()->Pop();
-
- Result tmp = allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it in a
- // register.
- bool value_is_constant = result.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- result.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(result.reg(),
- key.reg(),
- receiver.reg(),
- strict_mode_flag());
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ JumpIfNotSmi(key.reg(), deferred->entry_label());
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis. Use unsigned comparison to handle negative keys.
- __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
- key.reg());
- deferred->Branch(below_equal);
-
- // Get the elements array from the receiver and check that it is not a
- // dictionary.
- __ movq(tmp.reg(),
- FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the elements
- // array is in new space or the value written is a smi we can safely update
- // the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ JumpIfNotSmi(result.reg(), deferred->entry_label());
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the fixed
- // array map comparison. When debugging, we patch this comparison to
- // always fail so that we will hit the IC call in the deferred code
- // which will allow the debugger to break for fast case stores.
- __ bind(deferred->patch_site());
- // Avoid using __ to ensure the distance from patch_site
- // to the map address is always the same.
- masm()->movq(kScratchRegister, Factory::fixed_array_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Store the value.
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(FieldOperand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize),
- result.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
- deferred->BindExit();
- } else {
- result = frame()->CallKeyedStoreIC(strict_mode_flag());
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- }
- ASSERT(frame()->height() == original_height - 3);
- return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
-
- // Record the source position for the property load.
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Load from Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- break;
- }
-
- case NAMED: {
- Variable* var = expression_->AsVariableProxy()->AsVariable();
- bool is_global = var != NULL;
- ASSERT(!is_global || var->is_global());
- if (persist_after_get_) {
- cgen_->frame()->Dup();
- }
- Result result = cgen_->EmitNamedLoad(GetName(), is_global);
- cgen_->frame()->Push(&result);
- break;
- }
-
- case KEYED: {
- // A load of a bare identifier (load from global) cannot be keyed.
- ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
- if (persist_after_get_) {
- cgen_->frame()->PushElementAt(1);
- cgen_->frame()->PushElementAt(1);
- }
- Result value = cgen_->EmitKeyedLoad();
- cgen_->frame()->Push(&value);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- if (!persist_after_get_) {
- set_unloaded();
- }
-}
-
-
-void Reference::TakeValue() {
- // TODO(X64): This function is completely architecture independent. Move
- // it somewhere shared.
-
- // For non-constant frame-allocated slots, we invalidate the value in the
- // slot. For all others, we fall back on GetValue.
- ASSERT(!cgen_->in_spilled_code());
- ASSERT(!is_illegal());
- if (type_ != SLOT) {
- GetValue();
- return;
- }
-
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP ||
- slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST ||
- slot->is_arguments()) {
- GetValue();
- return;
- }
-
- // Only non-constant, frame-allocated parameters and locals can reach
- // here. Be careful not to use the optimizations for arguments
- // object access since it may not have been initialized yet.
- ASSERT(!slot->is_arguments());
- if (slot->type() == Slot::PARAMETER) {
- cgen_->frame()->TakeParameterAt(slot->index());
- } else {
- ASSERT(slot->type() == Slot::LOCAL);
- cgen_->frame()->TakeLocalAt(slot->index());
- }
-
- ASSERT(persist_after_get_);
- // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- MacroAssembler* masm = cgen_->masm();
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
- ASSERT(slot != NULL);
- cgen_->StoreToSlot(slot, init_state);
- set_unloaded();
- break;
- }
-
- case NAMED: {
- Comment cmnt(masm, "[ Store to named Property");
- Result answer = cgen_->EmitNamedStore(GetName(), false);
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case KEYED: {
- Comment cmnt(masm, "[ Store to keyed Property");
- Property* property = expression()->AsProperty();
- ASSERT(property != NULL);
-
- Result answer = cgen_->EmitKeyedStore(property->key()->type());
- cgen_->frame()->Push(&answer);
- set_unloaded();
- break;
- }
-
- case UNLOADED:
- case ILLEGAL:
- UNREACHABLE();
- }
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right) {
- if (stub->ArgsInRegistersSupported()) {
- stub->SetArgsInRegisters();
- return frame_->CallStub(stub, left, right);
- } else {
- frame_->Push(left);
- frame_->Push(right);
- return frame_->CallStub(stub, 2);
- }
-}
-
-#undef __
-
#define __ masm.
#ifdef _WIN64
@@ -8747,7 +58,7 @@ ModuloFunction CreateModuloFunction() {
&actual_size,
true));
CHECK(buffer);
- Assembler masm(buffer, static_cast<int>(actual_size));
+ Assembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
@@ -8821,7 +132,7 @@ ModuloFunction CreateModuloFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- // Call the function from C++.
+ // Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 4392829195..94c7850289 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -30,270 +30,17 @@
#include "ast.h"
#include "ic-inl.h"
-#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame. The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Like GetValue except that the slot is expected to be written to before
- // being read from again. The value of the reference may be invalidated,
- // causing subsequent attempts to read it to fail.
- void TakeValue();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through. The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally. Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
- ControlDestination(JumpTarget* true_target,
- JumpTarget* false_target,
- bool true_is_fall_through)
- : true_target_(true_target),
- false_target_(false_target),
- true_is_fall_through_(true_is_fall_through),
- is_used_(false) {
- ASSERT(true_is_fall_through ? !true_target->is_bound()
- : !false_target->is_bound());
- }
-
- // Accessors for the jump targets. Directly jumping or branching to
- // or binding the targets will not update the destination's state.
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- // True if the the destination has been jumped to unconditionally or
- // control has been split to both targets. This predicate does not
- // test whether the targets have been extracted and manipulated as
- // raw jump targets.
- bool is_used() const { return is_used_; }
-
- // True if the destination is used and the true target (respectively
- // false target) was the fall through. If the target is backward,
- // "fall through" included jumping unconditionally to it.
- bool true_was_fall_through() const {
- return is_used_ && true_is_fall_through_;
- }
-
- bool false_was_fall_through() const {
- return is_used_ && !true_is_fall_through_;
- }
-
- // Emit a branch to one of the true or false targets, and bind the
- // other target. Because this binds the fall-through target, it
- // should be emitted in tail position (as the last thing when
- // compiling an expression).
- void Split(Condition cc) {
- ASSERT(!is_used_);
- if (true_is_fall_through_) {
- false_target_->Branch(NegateCondition(cc));
- true_target_->Bind();
- } else {
- true_target_->Branch(cc);
- false_target_->Bind();
- }
- is_used_ = true;
- }
-
- // Emit an unconditional jump in tail position, to the true target
- // (if the argument is true) or the false target. The "jump" will
- // actually bind the jump target if it is forward, jump to it if it
- // is backward.
- void Goto(bool where) {
- ASSERT(!is_used_);
- JumpTarget* target = where ? true_target_ : false_target_;
- if (target->is_bound()) {
- target->Jump();
- } else {
- target->Bind();
- }
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Mark this jump target as used as if Goto had been called, but
- // without generating a jump or binding a label (the control effect
- // should have already happened). This is used when the left
- // subexpression of the short-circuit boolean operators are
- // compiled.
- void Use(bool where) {
- ASSERT(!is_used_);
- ASSERT((where ? true_target_ : false_target_)->is_bound());
- is_used_ = true;
- true_is_fall_through_ = where;
- }
-
- // Swap the true and false targets but keep the same actual label as
- // the fall through. This is used when compiling negated
- // expressions, where we want to swap the targets but preserve the
- // state.
- void Invert() {
- JumpTarget* temp_target = true_target_;
- true_target_ = false_target_;
- false_target_ = temp_target;
-
- true_is_fall_through_ = !true_is_fall_through_;
- }
-
- private:
- // True and false jump targets.
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // Before using the destination: true if the true target is the
- // preferred fall through, false if the false target is. After
- // using the destination: true if the true target was actually used
- // as the fall through, false if the false target was.
- bool true_is_fall_through_;
-
- // True if the Split or Goto functions have been called.
- bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair). It is threaded through
-// the call stack. Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own control destination.
- CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- // Accessors for the state.
- ControlDestination* destination() const { return destination_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A control destination in case the expression has a control-flow
- // effect.
- ControlDestination* destination_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
@@ -319,428 +66,7 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- bool in_spilled_code() const { return in_spilled_code_; }
- void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
- // Construction/Destruction
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors
- inline bool is_eval();
- inline Scope* scope();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- ControlDestination* destination() const { return state_->destination(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once).
- void GenerateReturnSequence(Result* return_value);
-
- // Generate code for a fast smi loop.
- void GenerateFastSmiLoop(ForStatement* node);
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- Result StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- Operand SlotOperand(Slot* slot, Register tmp);
-
- Operand ContextSlotOperandCheckExtensions(Slot* slot,
- Result tmp,
- JumpTarget* slow);
-
- // Expressions
- void LoadCondition(Expression* x,
- ControlDestination* destination,
- bool force_control);
- void Load(Expression* expr);
- void LoadGlobal();
- void LoadGlobalReceiver();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
- Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- Result* result,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the expression stack into a slot, leaving the
- // value in place.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Receiver is passed on the frame and not consumed.
- Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // If the store is contextual, value is passed on the frame and consumed.
- // Otherwise, receiver and value are passed on the frame and consumed.
- Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a property of an object, returning it in a Result.
- // The object and the property name are passed on the stack, and
- // not changed.
- Result EmitKeyedLoad();
-
- // Receiver, key, and value are passed on the frame and consumed.
- Result EmitKeyedStore(StaticType* key_type);
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Translate the value on top of the frame into control flow to the
- // control destination.
- void ToBoolean(ControlDestination* destination);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(BinaryOperation* expr,
- OverwriteMode overwrite_mode);
-
- // Generate a stub call from the virtual frame.
- Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
- Result* left,
- Result* right);
-
- // Emits code sequence that jumps to a JumpTarget if the inputs
- // are both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfBothSmiUsingTypeInfo(Result* left,
- Result* right,
- JumpTarget* both_smi);
-
- // Emits code sequence that jumps to deferred code if the input
- // is not a smi. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfNotSmiUsingTypeInfo(Register reg,
- TypeInfo type,
- DeferredCode* deferred);
-
- // Emits code sequence that jumps to deferred code if the inputs
- // are not both smis. Cannot be in MacroAssembler because it takes
- // advantage of TypeInfo to skip unneeded checks.
- void JumpIfNotBothSmiUsingTypeInfo(Register left,
- Register right,
- TypeInfo left_info,
- TypeInfo right_info,
- DeferredCode* deferred);
-
- // If possible, combine two constant smi values using op to produce
- // a smi result, and push it on the virtual frame, all at compile time.
- // Returns true if it succeeds. Otherwise it has no effect.
- bool FoldConstantSmis(Token::Value op, int left, int right);
-
- // Emit code to perform a binary operation on a constant
- // smi and a likely smi. Consumes the Result *operand.
- Result ConstantSmiBinaryOperation(BinaryOperation* expr,
- Result* operand,
- Handle<Object> constant_operand,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- // Emit code to perform a binary operation on two likely smis.
- // The code to handle smi arguments is produced inline.
- // Consumes the Results *left and *right.
- Result LikelySmiBinaryOperation(BinaryOperation* expr,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
-
- void Comparison(AstNode* node,
- Condition cc,
- bool strict,
- ControlDestination* destination);
-
- // If at least one of the sides is a constant smi, generate optimized code.
- void ConstantSmiComparison(Condition cc,
- bool strict,
- ControlDestination* destination,
- Result* left_side,
- Result* right_side,
- bool left_side_constant_smi,
- bool right_side_constant_smi,
- bool is_loop_condition);
-
- void GenerateInlineNumberComparison(Result* left_side,
- Result* right_side,
- Condition cc,
- ControlDestination* dest);
-
- // To prevent long attacker-controlled byte sequences, integer constants
- // from the JavaScript source are loaded in two parts if they are larger
- // than 16 bits.
- static const int kMaxSmiInlinedBits = 16;
- bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target using
- // at most 16 bits of user-controlled data per assembly operation.
- void LoadUnsafeSmi(Register target, Handle<Object> value);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- // Fast support for StringAdd.
- void GenerateStringAdd(ZoneList<Expression*>* args);
-
- // Fast support for SubString.
- void GenerateSubString(ZoneList<Expression*>* args);
-
- // Fast support for StringCompare.
- void GenerateStringCompare(ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements. Takes three expressions, the object and two
- // indices. This should only be used if the indices are known to be
- // non-negative and within bounds of the elements array at the call site.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- // Check whether two RegExps are equivalent.
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
- void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should
- // be no frame-external references to (non-reserved) registers.
- bool HasValidEntryRegisters();
-#endif
-
- ZoneList<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- CodeGenState* state_;
- int loop_nesting_;
-
- // Jump targets.
- // The target of the return from the function.
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- // True when we are in code that expects the virtual frame to be fully
- // spilled. Some virtual frame function are disabled in DEBUG builds when
- // called from spilled code, because they do not leave the virtual frame
- // in a spilled state.
- bool in_spilled_code_;
-
- friend class VirtualFrame;
- friend class JumpTarget;
- friend class Reference;
- friend class Result;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
-
- friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 3ff292e82b..e637ba124d 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -42,10 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
- CpuFeatures::Probe(true);
- if (Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return true; // Yay!
}
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 2c50ddd14e..423e6f2441 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -29,7 +29,8 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "codegen-inl.h"
+#include "assembler.h"
+#include "codegen.h"
#include "debug.h"
@@ -49,7 +50,8 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
+ rinfo()->PatchCodeWithCall(
+ Isolate::Current()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -79,7 +81,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Debug::debug_break_slot()->entry(),
+ Isolate::Current()->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
@@ -128,7 +130,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break());
+ __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);
@@ -167,7 +169,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget());
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ movq(kScratchRegister, after_break_target);
__ jmp(Operand(kScratchRegister, 0));
}
@@ -283,7 +285,8 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
+ ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
+ masm->isolate());
__ movq(rax, restarter_frame_function_slot);
__ movq(Operand(rax, 0), Immediate(0));
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 61f4381108..abac2b6b38 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -107,6 +107,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@@ -191,8 +192,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- node->set_next(deoptimizing_code_list_);
- deoptimizing_code_list_ = node;
+ DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ node->set_next(data->deoptimizing_code_list_);
+ data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -201,6 +203,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
@@ -354,13 +361,32 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
+ "(fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
@@ -387,7 +413,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
- Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ Code* continuation =
+ function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
@@ -559,8 +586,8 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = (bailout_type_ == EAGER)
- ? Builtins::builtin(Builtins::NotifyDeoptimized)
- : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
+ : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
@@ -573,7 +600,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -636,21 +662,26 @@ void Deoptimizer::EntryGenerator::Generate() {
__ neg(arg5);
// Allocate a new deoptimizer object.
- __ PrepareCallCFunction(5);
+ __ PrepareCallCFunction(6);
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(arg1, rax);
- __ movq(arg2, Immediate(type()));
+ __ Set(arg2, type());
// Args 3 and 4 are already in the right registers.
- // On windows put the argument on the stack (PrepareCallCFunction have
- // created space for this). On linux pass the argument in r8.
+ // On windows put the arguments on the stack (PrepareCallCFunction
+ // has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
- __ movq(Operand(rsp, 0 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+ __ LoadAddress(arg5, ExternalReference::isolate_address());
+ __ movq(Operand(rsp, 5 * kPointerSize), arg5);
#else
__ movq(r8, arg5);
+ __ LoadAddress(r9, ExternalReference::isolate_address());
#endif
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ Isolate* isolate = masm()->isolate();
+
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -693,9 +724,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Compute the output frame in the deoptimizer.
__ push(rax);
- __ PrepareCallCFunction(1);
+ __ PrepareCallCFunction(2);
__ movq(arg1, rax);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ LoadAddress(arg2, ExternalReference::isolate_address());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 2);
__ pop(rax);
// Replace the current frame with the output frames.
@@ -753,12 +786,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(r13, roots_address);
-
- __ movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ __ InitializeRootRegister();
+ __ InitializeSmiConstantRegister();
// Return to the continuation point.
__ ret(0);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 21a100f59a..14c95bc5ac 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -269,6 +269,7 @@ void InstructionTable::AddJumpConditionalShort() {
static InstructionTable instruction_table;
+
static InstructionDesc cmov_instructions[16] = {
{"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
@@ -451,9 +452,11 @@ void DisassemblerX64::AppendToBuffer(const char* format, ...) {
int DisassemblerX64::PrintRightOperandHelper(
byte* modrmp,
- RegisterNameMapping register_name) {
+ RegisterNameMapping direct_register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
+ RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+ &DisassemblerX64::NameOfCPURegister;
switch (mod) {
case 0:
if ((rm & 7) == 5) {
@@ -649,6 +652,9 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
case 2:
mnem = "adc";
break;
+ case 3:
+ mnem = "sbb";
+ break;
case 4:
mnem = "and";
break;
@@ -1015,12 +1021,26 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x0b) {
+ get_modrm(*current, &mod, &regop, &rm);
+ // roundsd xmm, xmm/m64, imm8
+ AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
} else {
UnimplementedInstruction();
}
} else {
get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x6E) {
+ if (opcode == 0x28) {
+ AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x29) {
+ AppendToBuffer("movapd ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x6E) {
AppendToBuffer("mov%c %s,",
rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
@@ -1028,7 +1048,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x6F) {
AppendToBuffer("movdqa %s,",
NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
@@ -1036,13 +1056,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0xD6) {
+ AppendToBuffer("movq ");
+ current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x50) {
+ AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
const char* mnemonic = "?";
- if (opcode == 0x50) {
- mnemonic = "movmskpd";
- } else if (opcode == 0x54) {
+ if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
@@ -1068,11 +1093,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
if (opcode == 0x11) {
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
}
} else if (opcode == 0x2A) {
// CVTSI2SD: integer to XMM double conversion.
@@ -1139,6 +1164,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x7E) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1156,6 +1186,22 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 4;
} // else no immediate displacement.
AppendToBuffer("nop");
+
+ } else if (opcode == 0x28) {
+ // movaps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x29) {
+ // movaps xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movaps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
} else if (opcode == 0xA2 || opcode == 0x31) {
// RDTSC or CPUID
AppendToBuffer("%s", mnemonic);
@@ -1167,6 +1213,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+ } else if (opcode == 0x57) {
+ // xorps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1435,19 +1488,26 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
{
bool is_byte = *data == 0xC6;
data++;
-
- AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
- data += PrintRightOperand(data);
- int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += is_byte ? 1 : 4;
+ if (is_byte) {
+ AppendToBuffer("movb ");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ AppendToBuffer("mov%c ", operand_size_code());
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
}
break;
case 0x80: {
data++;
AppendToBuffer("cmpb ");
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
@@ -1461,9 +1521,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
data++;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
+ if (is_byte) {
+ AppendToBuffer("movb ");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("mov%c ", operand_size_code());
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
}
break;
@@ -1486,20 +1552,51 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
}
break;
-
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ case 0xB8:
+ case 0xB9:
+ case 0xBA:
+ case 0xBB:
+ case 0xBC:
+ case 0xBD:
+ case 0xBE:
+ case 0xBF: {
+ // mov reg8,imm8 or mov reg32,imm32
+ byte opcode = *data;
+ data++;
+ bool is_32bit = (opcode >= 0xB8);
+ int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
+ if (is_32bit) {
+ AppendToBuffer("mov%c %s, ",
+ operand_size_code(),
+ NameOfCPURegister(reg));
+ data += PrintImmediate(data, DOUBLEWORD_SIZE);
+ } else {
+ AppendToBuffer("movb %s, ",
+ NameOfByteCPURegister(reg));
+ data += PrintImmediate(data, BYTE_SIZE);
+ }
+ break;
+ }
case 0xFE: {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (regop == 1) {
AppendToBuffer("decb ");
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
} else {
UnimplementedInstruction();
}
- }
break;
-
+ }
case 0x68:
AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
data += 5;
@@ -1652,9 +1749,8 @@ static const char* xmm_regs[16] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 81be819196..b14267c82d 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -99,7 +99,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 60b77b5bfe..4283f1be69 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
@@ -44,6 +44,12 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+static unsigned GetPropertyId(Property* property) {
+ if (property->is_synthetic()) return AstNode::kNoNumber;
+ return property->id();
+}
+
+
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
@@ -57,14 +63,18 @@ class JumpPatchSite BASE_EMBEDDED {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
- void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ void EmitJumpIfNotSmi(Register reg,
+ Label* target,
+ Label::Distance near_jump = Label::kFar) {
__ testb(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target); // Always taken before patched.
+ EmitJump(not_carry, target, near_jump); // Always taken before patched.
}
- void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ void EmitJumpIfSmi(Register reg,
+ Label* target,
+ Label::Distance near_jump = Label::kFar) {
__ testb(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target); // Never taken before patched.
+ EmitJump(carry, target, near_jump); // Never taken before patched.
}
void EmitPatchInfo() {
@@ -80,11 +90,11 @@ class JumpPatchSite BASE_EMBEDDED {
private:
// jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, NearLabel* target) {
+ void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
- __ j(cc, target);
+ __ j(cc, target, near_jump);
}
MacroAssembler* masm_;
@@ -120,6 +130,22 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ int3();
}
#endif
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). rcx is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
+
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -149,7 +175,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both rax and rsi. It replaces the context
@@ -198,13 +224,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ ArgumentsAccessStub stub(
+ is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
+ : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
__ CallStub(&stub);
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
+
Move(arguments->AsSlot(), rax, rbx, rdx);
- Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
- Move(dot_arguments_slot, rcx, rbx, rdx);
}
if (FLAG_trace) {
@@ -227,10 +252,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -259,9 +284,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
- NearLabel ok;
+ Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
@@ -346,7 +371,7 @@ void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -379,7 +404,7 @@ void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -424,7 +449,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
} else {
// For simplicity we always test the accumulator register.
__ Move(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -460,7 +485,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count,
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -474,12 +499,12 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
void FullCodeGenerator::AccumulatorValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
- NearLabel done;
+ Label done;
__ bind(materialize_true);
- __ Move(result_register(), Factory::true_value());
- __ jmp(&done);
+ __ Move(result_register(), isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(materialize_false);
- __ Move(result_register(), Factory::false_value());
+ __ Move(result_register(), isolate()->factory()->false_value());
__ bind(&done);
}
@@ -487,12 +512,12 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
- NearLabel done;
+ Label done;
__ bind(materialize_true);
- __ Push(Factory::true_value());
- __ jmp(&done);
+ __ Push(isolate()->factory()->true_value());
+ __ jmp(&done, Label::kNear);
__ bind(materialize_false);
- __ Push(Factory::false_value());
+ __ Push(isolate()->factory()->false_value());
__ bind(&done);
}
@@ -535,28 +560,14 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
- // Emit the inlined tests assumed by the stub.
- __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
- __ j(equal, if_false);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
- __ j(equal, if_false);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiCompare(result_register(), Smi::FromInt(0));
- __ j(equal, if_false);
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, if_true);
-
- // Call the ToBoolean stub for all other cases.
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ testq(rax, rax);
-
+ __ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -627,8 +638,8 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
// preparation to avoid preparing with the same AST id twice.
if (!context()->IsTest() || !info_->IsOptimizable()) return;
- NearLabel skip;
- if (should_normalize) __ jmp(&skip);
+ Label skip;
+ if (should_normalize) __ jmp(&skip, Label::kNear);
ForwardBailoutStack* current = forward_bailout_stack_;
while (current != NULL) {
@@ -669,13 +680,16 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ movq(rbx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ cmpq(rbx, rsi);
- __ Check(equal, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
+ __ Check(not_equal, "Declaration in with context.");
+ __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
+ __ Check(not_equal, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
@@ -714,31 +728,28 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
}
} else if (prop != NULL) {
- if (function != NULL || mode == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value. We
- // cannot visit the rewrite because it's shared and we risk
- // recording duplicate AST IDs for bailouts from optimized code.
+ // A const declaration aliasing a parameter is an illegal redeclaration.
+ ASSERT(mode != Variable::CONST);
+ if (function != NULL) {
+ // We are declaring a function that rewrites to a property.
+ // Use (keyed) IC to set the initial value. We cannot visit the
+ // rewrite because it's shared and we risk recording duplicate AST
+ // IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
- if (function != NULL) {
- __ push(rax);
- VisitForAccumulatorValue(function);
- __ pop(rdx);
- } else {
- __ movq(rdx, rax);
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- }
+ __ push(rax);
+ VisitForAccumulatorValue(function);
+ __ pop(rdx);
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
}
}
}
@@ -776,7 +787,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
- clause->body_target()->entry_label()->Unuse();
+ clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
@@ -796,27 +807,27 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- NearLabel slow_case;
+ Label slow_case;
__ movq(rcx, rdx);
__ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
__ bind(&slow_case);
}
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, clause->CompareId());
__ testq(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ __ jmp(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
@@ -826,14 +837,15 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (default_clause == NULL) {
__ jmp(nested_statement.break_target());
} else {
- __ jmp(default_clause->body_target()->entry_label());
+ __ jmp(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target()->entry_label());
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
@@ -864,7 +876,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
__ push(rax);
@@ -895,9 +907,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// check for an enum cache. Leave the map in rbx for the subsequent
// prototype load.
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
- __ cmpq(rdx, empty_descriptor_array_value);
- __ j(equal, &call_runtime);
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+ __ JumpIfSmi(rdx, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (rdx). This is the case if the next enumeration
@@ -906,9 +917,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ JumpIfSmi(rdx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
- NearLabel check_prototype;
+ Label check_prototype;
__ cmpq(rcx, rax);
- __ j(equal, &check_prototype);
+ __ j(equal, &check_prototype, Label::kNear);
__ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmpq(rdx, empty_fixed_array_value);
__ j(not_equal, &call_runtime);
@@ -921,9 +932,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
- NearLabel use_cache;
+ Label use_cache;
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache);
+ __ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
@@ -933,14 +944,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
- NearLabel fixed_array;
+ Label fixed_array;
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array);
+ __ j(not_equal, &fixed_array, Label::kNear);
// We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
- __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
+ __ LoadInstanceDescriptors(rax, rcx);
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -980,10 +991,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
- NearLabel update_each;
+ Label update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ j(equal, &update_each);
+ __ j(equal, &update_each, Label::kNear);
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
@@ -991,7 +1002,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rcx); // Enumerable.
__ push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ SmiCompare(rax, Smi::FromInt(0));
+ __ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_target());
__ movq(rbx, rax);
@@ -1035,16 +1046,18 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
+ !pretenure &&
scope()->is_function_scope() &&
- info->num_literals() == 0 &&
- !pretenure) {
- FastNewClosureStub stub;
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
__ Push(info);
__ CallStub(&stub);
} else {
__ push(rsi);
__ Push(info);
- __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ __ Push(pretenure
+ ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value());
__ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(rax);
@@ -1074,8 +1087,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1089,7 +1101,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
- NearLabel next, fast;
+ Label next, fast;
if (!context.is(temp)) {
__ movq(temp, context);
}
@@ -1098,13 +1110,12 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
__ bind(&next);
// Terminate at global context.
__ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
- __ j(equal, &fast);
+ __ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
@@ -1113,11 +1124,11 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
// load IC call.
__ movq(rax, GlobalObjectOperand());
__ Move(rcx, slot->var()->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, mode, AstNode::kNoNumber);
}
@@ -1136,8 +1147,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1196,8 +1206,9 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
__ Move(rax, key_literal->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic =
+ isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1207,20 +1218,19 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
- if (var->is_global() && !var->is_this()) {
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
context()->Plug(rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1239,52 +1249,24 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
context()->Plug(rax);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
- NearLabel done;
+ Label done;
MemOperand slot_operand = EmitSlotSearch(slot, rax);
__ movq(rax, slot_operand);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ bind(&done);
context()->Plug(rax);
} else {
context()->Plug(slot);
}
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ movq(rdx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ Move(rax, key_literal->handle());
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
- context()->Plug(rax);
}
}
@@ -1349,7 +1331,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_properties());
- __ Push(Smi::FromInt(expr->fast_elements() ? 1 : 0));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Push(Smi::FromInt(flags));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
@@ -1387,10 +1375,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
__ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1422,6 +1410,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ push(Operand(rsp, 0));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1440,11 +1434,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_elements());
- if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
__ CallStub(&stub);
- __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
} else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -1504,7 +1499,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1530,55 +1525,38 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Push(property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -1589,13 +1567,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr,
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
mode,
expr->target(),
expr->value());
} else {
- EmitBinaryOp(op, mode);
+ EmitBinaryOp(expr->binary_operation(), op, mode);
}
// Deoptimization point in case the binary operation may have side effects.
PrepareForBailout(expr->binary_operation(), TOS_REG);
@@ -1628,19 +1606,19 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
@@ -1648,18 +1626,18 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
- NearLabel done, stub_call, smi_case;
+ Label done, stub_call, smi_case;
__ pop(rdx);
__ movq(rcx, rax);
__ or_(rax, rdx);
JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(rax, &smi_case);
+ patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ movq(rax, rcx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site);
- __ jmp(&done);
+ BinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ jmp(&done, Label::kNear);
__ bind(&smi_case);
switch (op) {
@@ -1700,11 +1678,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
OverwriteMode mode) {
__ pop(rdx);
- TypeRecordingBinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
+ BinaryOpStub stub(op, mode);
+ // NULL signals no inlined smi code.
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
context()->Plug(rax);
}
@@ -1718,7 +1698,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1741,33 +1721,23 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ movq(rdx, rax);
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ movq(rcx, rax);
+ __ pop(rdx);
__ pop(rax); // Restore value.
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
break;
}
}
@@ -1778,8 +1748,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1790,10 +1758,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// rcx, and the global object on the stack.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1813,17 +1781,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ j(not_equal, &skip);
__ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
- case Slot::CONTEXT: {
- __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ movq(rdx, ContextOperand(rcx, slot->index()));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(ContextOperand(rcx, slot->index()), rax);
- int offset = Context::SlotOffset(slot->index());
- __ movq(rdx, rax); // Preserve the stored value in eax.
- __ RecordWrite(rcx, offset, rdx, rbx);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(rax);
__ push(rsi);
@@ -1893,10 +1851,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} else {
__ pop(rdx);
}
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1933,10 +1891,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1986,8 +1944,9 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, mode);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1996,8 +1955,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key,
- RelocInfo::Mode mode) {
+ Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
@@ -2019,9 +1977,10 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
SetSourcePosition(expr->position());
// Call the IC initialization code.
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, mode);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2029,7 +1988,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2041,7 +2000,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, flags);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2132,7 +2091,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2170,18 +2129,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
- NearLabel call;
- __ jmp(&call);
+ Label call;
+ __ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
__ push(rax);
- // Push global receiver.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- __ bind(&call);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ bind(&call);
}
- EmitCallWithStub(expr);
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot. That object could be the hole if the
+ // receiver is implicitly the global object.
+ EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
} else if (fun->AsProperty() != NULL) {
// Call to an object property.
Property* prop = fun->AsProperty();
@@ -2212,31 +2174,22 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source code position for IC call.
SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(rax);
// Push Global receiver.
__ movq(rcx, GlobalObjectOperand());
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
} else {
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(prop->obj());
}
- EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+ EmitKeyedCallWithIC(expr, prop->key());
}
}
} else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the full code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_full_codegen(true);
- }
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(fun);
}
@@ -2244,7 +2197,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ movq(rbx, GlobalObjectOperand());
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Emit function call.
- EmitCallWithStub(expr);
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
}
#ifdef DEBUG
@@ -2280,7 +2233,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Set(rax, arg_count);
__ movq(rdi, Operand(rsp, arg_count * kPointerSize));
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> construct_builtin =
+ isolate()->builtins()->JSConstructCall();
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
context()->Plug(rax);
}
@@ -2347,9 +2301,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2370,7 +2324,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -2414,11 +2368,71 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
- // used in a few functions in runtime.js which should not normally be hit by
- // this compiler.
+ if (FLAG_debug_code) __ AbortIfSmi(rax);
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ j(not_zero, if_true);
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
+ __ j(equal, if_false);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ LoadInstanceDescriptors(rbx, rbx);
+ __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ // rbx: descriptor array
+ // rcx: length of descriptor array
+ // Calculate the end of the descriptor array.
+ SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
+ __ lea(rcx,
+ Operand(
+ rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ addq(rbx,
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(rdx, FieldOperand(rbx, 0));
+ __ Cmp(rdx, FACTORY->value_of_symbol());
+ __ j(equal, if_false);
+ __ addq(rbx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rbx, rcx);
+ __ j(not_equal, &loop);
+
+ // Reload map as register rbx was used as temporary above.
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ testq(rcx, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ cmpq(rcx,
+ ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, if_false);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ jmp(if_true);
+
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ jmp(if_false);
context()->Plug(if_true, if_false);
}
@@ -2502,15 +2516,15 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
- __ SmiCompare(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
__ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
- __ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2558,15 +2572,15 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- NearLabel exit;
+ Label exit;
// Get the number of formal parameters.
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit);
+ __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &exit, Label::kNear);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
@@ -2589,16 +2603,18 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); // Map is now in rax.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
+ // Map is now in rax.
__ j(below, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
@@ -2613,12 +2629,12 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, Factory::function_class_symbol());
+ __ Move(rax, isolate()->factory()->function_class_symbol());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, Factory::Object_symbol());
+ __ Move(rax, isolate()->factory()->Object_symbol());
__ jmp(&done);
// Non-JS objects have class null.
@@ -2672,8 +2688,13 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
// Return a random uint32 number in rax.
// The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+ __ PrepareCallCFunction(1);
+#ifdef _WIN64
+ __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+ __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
@@ -2682,7 +2703,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
__ movd(xmm1, rcx);
__ movd(xmm0, rax);
__ cvtss2sd(xmm1, xmm1);
- __ xorpd(xmm0, xmm1);
+ __ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
__ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
@@ -2967,17 +2988,17 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // For receiver and function.
- VisitForStackValue(args->at(0)); // Receiver.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i + 1));
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
}
- VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
+ VisitForAccumulatorValue(args->last()); // Function.
- // InvokeFunction requires function in rdi. Move it in there.
- if (!result_register().is(rdi)) __ movq(rdi, result_register());
+ // InvokeFunction requires the function in rdi. Move it in there.
+ __ movq(rdi, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION);
+ __ InvokeFunction(rdi, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
@@ -3010,8 +3031,8 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
- __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
- __ j(below, &slow_case);
+ __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
+ __ j(not_equal, &slow_case);
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
__ j(not_zero, &slow_case);
@@ -3077,7 +3098,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
- Top::global_context()->jsfunction_result_caches());
+ isolate()->global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3098,7 +3119,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ movq(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
- NearLabel done, not_found;
+ Label done, not_found;
// tmp now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
@@ -3108,12 +3129,12 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
index.reg,
index.scale,
FixedArray::kHeaderSize));
- __ j(not_equal, &not_found);
+ __ j(not_equal, &not_found, Label::kNear);
__ movq(rax, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize + kPointerSize));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&not_found);
// Call runtime to perform the lookup.
@@ -3137,27 +3158,27 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(1));
__ pop(left);
- NearLabel done, fail, ok;
+ Label done, fail, ok;
__ cmpq(left, right);
- __ j(equal, &ok);
+ __ j(equal, &ok, Label::kNear);
// Fail if either is a non-HeapObject.
Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail);
- __ j(zero, &fail);
+ __ j(either_smi, &fail, Label::kNear);
+ __ j(zero, &fail, Label::kNear);
__ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail);
+ __ j(not_equal, &fail, Label::kNear);
__ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
+ __ j(not_equal, &fail, Label::kNear);
__ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
__ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
+ __ j(equal, &ok, Label::kNear);
__ bind(&fail);
- __ Move(rax, Factory::false_value());
- __ jmp(&done);
+ __ Move(rax, isolate()->factory()->false_value());
+ __ jmp(&done, Label::kNear);
__ bind(&ok);
- __ Move(rax, Factory::true_value());
+ __ Move(rax, isolate()->factory()->true_value());
__ bind(&done);
context()->Plug(rax);
@@ -3203,7 +3224,286 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- context()->Plug(Heap::kUndefinedValueRootIndex);
+ Label bailout, return_result, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+ ASSERT(args->length() == 2);
+ // We will leave the separator on the stack until the end of the function.
+ VisitForStackValue(args->at(1));
+ // Load this to rax (= array)
+ VisitForAccumulatorValue(args->at(0));
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = rax;
+ Register elements = no_reg; // Will be rax.
+
+ Register index = rdx;
+
+ Register string_length = rcx;
+
+ Register string = rsi;
+
+ Register scratch = rbx;
+
+ Register array_length = rdi;
+ Register result_pos = no_reg; // Will be rdi.
+
+ Operand separator_operand = Operand(rsp, 2 * kPointerSize);
+ Operand result_operand = Operand(rsp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
+ // Separator operand is already pushed. Make room for the two
+ // other stack fields, and clear the direction flag in anticipation
+ // of calling CopyBytes.
+ __ subq(rsp, Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
+ __ JumpIfSmi(array, &bailout);
+ __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch, &bailout);
+
+ // Array has fast elements, so its length must be a smi.
+ // If the array has length zero, return the empty string.
+ __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ SmiCompare(array_length, Smi::FromInt(0));
+ __ j(not_zero, &non_trivial_array);
+ __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ jmp(&return_result);
+
+ // Save the array length on the stack.
+ __ bind(&non_trivial_array);
+ __ SmiToInteger32(array_length, array_length);
+ __ movl(array_length_operand, array_length);
+
+ // Save the FixedArray containing array's elements.
+ // End of array's live range.
+ elements = array;
+ __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
+ array = no_reg;
+
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Set(index, 0);
+ __ Set(string_length, 0);
+ // Loop condition: while (index < array_length).
+ // Live loop registers: index(int32), array_length(int32), string(String*),
+ // scratch, string_length(int32), elements(FixedArray*).
+ if (FLAG_debug_code) {
+ __ cmpq(index, array_length);
+ __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
+ }
+ __ bind(&loop);
+ __ movq(string, FieldOperand(elements,
+ index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(string, &bailout);
+ __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ andb(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ j(not_equal, &bailout);
+ __ AddSmiField(string_length,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ incl(index);
+ __ cmpl(index, array_length);
+ __ j(less, &loop);
+
+ // Live registers:
+ // string_length: Sum of string lengths.
+ // elements: FixedArray of strings.
+ // index: Array length.
+ // array_length: Array length.
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmpl(array_length, Immediate(1));
+ __ j(not_equal, &not_size_one_array);
+ __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ jmp(&return_result);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths.
+ // elements: FixedArray of strings.
+ // index: Array length.
+
+ // Check that the separator is a sequential ASCII string.
+ __ movq(string, separator_operand);
+ __ JumpIfSmi(string, &bailout);
+ __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ andb(scratch, Immediate(
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
+ __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ j(not_equal, &bailout);
+
+ // Live registers:
+ // string_length: Sum of string lengths.
+ // elements: FixedArray of strings.
+ // index: Array length.
+ // string: Separator string.
+
+ // Add (separator length times (array_length - 1)) to string_length.
+ __ SmiToInteger32(scratch,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ decl(index);
+ __ imull(scratch, index);
+ __ j(overflow, &bailout);
+ __ addl(string_length, scratch);
+ __ j(overflow, &bailout);
+
+ // Live registers and stack values:
+ // string_length: Total length of result string.
+ // elements: FixedArray of strings.
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ movq(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+ __ movq(string, separator_operand);
+ __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ Smi::FromInt(1));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case:
+ __ Set(index, 0);
+ __ movl(scratch, array_length_operand);
+ __ jmp(&loop_1_condition);
+ // Loop condition: while (index < array_length).
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+ // scratch: array length.
+
+ // Get string = array[index].
+ __ movq(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(result_pos, string, string_length);
+ __ incl(index);
+ __ bind(&loop_1_condition);
+ __ cmpl(index, scratch);
+ __ j(less, &loop_1); // Loop while (index < array_length).
+ __ jmp(&done);
+
+ // Generic bailout code used from several places.
+ __ bind(&bailout);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ jmp(&return_result);
+
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Get the separator ascii character value.
+ // Register "string" holds the separator.
+ __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ Set(index, 0);
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // elements: The FixedArray of strings we are joining.
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // scratch: Separator character.
+
+ // Copy the separator character to the result.
+ __ movb(Operand(result_pos, 0), scratch);
+ __ incq(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ movq(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ SmiToInteger32(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(result_pos, string, string_length);
+ __ incl(index);
+ __ cmpl(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ // Make elements point to end of elements array, and index
+ // count from -array_length to zero, so we don't need to maintain
+ // a loop limit.
+ __ movl(index, array_length_operand);
+ __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ neg(index);
+
+ // Replace separator string with pointer to its first character, and
+ // make scratch be its length.
+ __ movq(string, separator_operand);
+ __ SmiToInteger32(scratch,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ movq(separator_operand, string);
+
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // scratch: Separator length.
+ // separator_operand (rsp[0x10]): Address of first char of separator.
+
+ // Copy the separator to the result.
+ __ movq(string, separator_operand);
+ __ movl(string_length, scratch);
+ __ CopyBytes(result_pos, string, string_length, 2);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ movq(string, Operand(elements, index, times_pointer_size, 0));
+ __ SmiToInteger32(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(result_pos, string, string_length);
+ __ incq(index);
+ __ j(not_equal, &loop_3); // Loop while (index < 0).
+
+ __ bind(&done);
+ __ movq(rax, result_operand);
+
+ __ bind(&return_result);
+ // Drop temp values from the stack, and restore context register.
+ __ addq(rsp, Immediate(3 * kPointerSize));
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->Plug(rax);
}
@@ -3234,8 +3534,10 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the JS runtime function using a call IC.
__ Move(rcx, expr->name());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ EmitCallIC(ic, mode, expr->id());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3338,8 +3640,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3347,46 +3648,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(rax);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- Label call_stub;
- __ JumpIfNotSmi(rax, &call_stub);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(rax);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
@@ -3394,6 +3662,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ UnaryOpStub stub(expr->op(), overwrite);
+ // UnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -3406,7 +3691,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3432,16 +3717,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
@@ -3450,13 +3727,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
- NearLabel no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &no_conversion);
+ Label no_conversion;
+ __ JumpIfSmi(rax, &no_conversion, Label::kNear);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3482,7 +3761,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
- NearLabel stub_call, done;
+ Label done, stub_call;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
@@ -3491,10 +3770,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
__ SmiSubConstant(rax, rax, Smi::FromInt(1));
}
- __ j(overflow, &stub_call);
+ __ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done);
+ patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
__ bind(&stub_call);
// Call stub. Undo operation first.
@@ -3509,14 +3788,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
if (expr->op() == Token::INC) {
__ Move(rdx, Smi::FromInt(1));
} else {
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- EmitCallIC(stub.GetCode(), &patch_site);
+ EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
__ bind(&done);
// Store the value returned in rax.
@@ -3546,10 +3825,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3563,10 +3842,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(rcx);
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3590,10 +3869,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
__ movq(rax, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL &&
@@ -3616,94 +3895,80 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_true);
+ if (check->Equals(isolate()->heap()->number_symbol())) {
+ __ JumpIfSmi(rax, if_true);
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ __ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, if_false);
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ CmpInstanceType(rdx, FIRST_NONSTRING_TYPE);
- Split(below, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::boolean_symbol())) {
+ Split(zero, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(rdx, JS_REGEXP_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ __ JumpIfSmi(rax, if_false);
+ STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
+ Split(above_equal, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ __ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rdx);
- __ j(equal, if_false);
+ __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
+ __ j(below, if_false);
+ __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
// Check for undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- // Check for JS objects => true.
- __ CmpInstanceType(rdx, FIRST_JS_OBJECT_TYPE);
- __ j(below, if_false);
- __ CmpInstanceType(rdx, LAST_JS_OBJECT_TYPE);
- Split(below_equal, if_true, if_false, fall_through);
+ Split(zero, if_true, if_false, fall_through);
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- return true;
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -3722,14 +3987,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
@@ -3792,10 +4055,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- NearLabel slow_case;
+ Label slow_case;
__ movq(rcx, rdx);
__ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
@@ -3804,7 +4067,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site);
+ EmitCallIC(ic, &patch_site, expr->id());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@@ -3836,8 +4099,7 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// It can be an undetectable object.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
@@ -3864,69 +4126,51 @@ Register FullCodeGenerator::context_register() {
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
-
- __ call(ic, mode);
-
- // Crankshaft doesn't need patching of inlined loads and stores.
- // When compiling the snapshot we need to produce code that works
- // with and without Crankshaft.
- if (V8::UseCrankshaft() && !Serializer::enabled()) {
- return;
- }
-
- // If we're calling a (keyed) load or store stub, we have to mark
- // the call as containing no inlined code so we will not attempt to
- // patch it.
- switch (ic->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- __ nop(); // Signals no inlined code.
- break;
- default:
- // Do nothing.
- break;
- }
+ __ call(ic, mode, ast_id);
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ JumpPatchSite* patch_site,
+ unsigned ast_id) {
+ Counters* counters = isolate()->counters();
switch (ic->kind()) {
case Code::LOAD_IC:
- __ IncrementCounter(&Counters::named_load_full, 1);
+ __ IncrementCounter(counters->named_load_full(), 1);
break;
case Code::KEYED_LOAD_IC:
- __ IncrementCounter(&Counters::keyed_load_full, 1);
+ __ IncrementCounter(counters->keyed_load_full(), 1);
break;
case Code::STORE_IC:
- __ IncrementCounter(&Counters::named_store_full, 1);
+ __ IncrementCounter(counters->named_store_full(), 1);
break;
case Code::KEYED_STORE_IC:
- __ IncrementCounter(&Counters::keyed_store_full, 1);
+ __ IncrementCounter(counters->keyed_store_full(), 1);
default:
break;
}
-
- __ call(ic, RelocInfo::CODE_TARGET);
+ __ call(ic, RelocInfo::CODE_TARGET, ast_id);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
@@ -3946,6 +4190,25 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ if (scope()->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ Push(Smi::FromInt(0));
+ } else if (scope()->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(scope()->is_function_scope());
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index b3243cf48a..8919765cbc 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -76,11 +76,11 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// Check that the receiver is a valid JS object.
__ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
@@ -97,58 +97,6 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
}
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
-
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
- __ decl(r0);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- for (int i = 0; i < kProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
- if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r1, r0);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
-
- // Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- if (i != kProbes - 1) {
- __ j(equal, done);
- } else {
- __ j(not_equal, miss);
- }
- }
-}
-
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
@@ -179,13 +127,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
// If probing finds an entry in the dictionary, r0 contains the
// index into the dictionary. Check that the value is a normal
@@ -237,13 +185,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
Label done;
// Probe the dictionary.
- GenerateStringDictionaryProbes(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ scratch0,
+ scratch1);
// If probing finds an entry in the dictionary, scratch0 contains the
// index into the dictionary. Check that the value is a normal
@@ -381,11 +329,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
}
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -554,7 +497,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string, property_array_property;
- Label check_pixel_array, probe_dictionary, check_number_dictionary;
+ Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
@@ -565,11 +508,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in rcx.
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kHasFastElements));
- __ j(zero, &check_pixel_array);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(rcx, &check_number_dictionary);
GenerateFastArrayLoad(masm,
rdx,
@@ -579,21 +519,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
rax,
NULL,
&slow);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayLoad(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rax,
- &check_number_dictionary,
- NULL,
- &slow);
-
__ bind(&check_number_dictionary);
+ __ SmiToInteger32(rbx, rax);
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+
// Check whether the elements is a number dictionary.
// rdx: receiver
// rax: key
@@ -609,7 +542,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Jump to runtime.
// rdx: receiver
// rax: key
- __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
@@ -638,10 +571,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys();
+ = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
__ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ movq(kScratchRegister, cache_keys);
+ __ LoadAddress(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
@@ -649,8 +582,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get field offset, which is a 32-bit integer.
ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets();
- __ movq(kScratchRegister, cache_field_offsets);
+ = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+ __ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
@@ -660,7 +593,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Load property array property.
@@ -668,7 +601,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
FixedArray::kHeaderSize));
- __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -683,7 +616,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
- __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_string);
@@ -722,7 +655,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
@@ -758,11 +691,14 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(rcx); // return address
// Perform tail call to the entry.
- __ TailCallExternalReference(ExternalReference(
- IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
__ bind(&slow);
- GenerateMiss(masm);
+ GenerateMiss(masm, false);
}
@@ -774,7 +710,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra, check_pixel_array;
+ Label slow, slow_with_tagged_index, fast, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
@@ -791,9 +727,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(rbx, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// rax: value
@@ -803,7 +743,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_pixel_array);
+ __ j(not_equal, &slow);
__ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
@@ -817,25 +757,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
GenerateRuntimeSetProperty(masm, strict_mode);
// Never returns to here.
- // Check whether the elements is a pixel array.
- // rax: value
- // rdx: receiver
- // rbx: receiver's elements array
- // rcx: index, zero-extended.
- __ bind(&check_pixel_array);
- GenerateFastPixelArrayStore(masm,
- rdx,
- rcx,
- rax,
- rbx,
- rdi,
- false,
- true,
- NULL,
- &slow,
- &slow,
- &slow);
-
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
@@ -875,10 +796,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
- NearLabel non_smi_value;
+ Label non_smi_value;
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
- __ JumpIfNotSmi(rax, &non_smi_value);
+ __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
@@ -893,7 +814,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
- Code::Kind kind) {
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rdx : receiver
@@ -904,10 +826,11 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
- Code::kNoExtraICState,
+ extra_ic_state,
NORMAL,
argc);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -943,7 +866,8 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ no_reg);
__ bind(&miss);
}
@@ -969,7 +893,8 @@ static void GenerateFunctionTailCall(MacroAssembler* masm,
// Invoke the function.
ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -1001,7 +926,10 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1012,10 +940,11 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
+ Counters* counters = masm->isolate()->counters();
if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(&Counters::call_miss, 1);
+ __ IncrementCounter(counters->call_miss(), 1);
} else {
- __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ __ IncrementCounter(counters->keyed_call_miss(), 1);
}
// Get the receiver of the function from the stack; 1 ~ return address.
@@ -1030,8 +959,8 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
// Call the entry.
CEntryStub stub(1);
- __ movq(rax, Immediate(2));
- __ movq(rbx, ExternalReference(IC_Utility(id)));
+ __ Set(rax, 2);
+ __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@@ -1057,12 +986,21 @@ static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
}
// Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+ __ InvokeFunction(rdi,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ call_kind);
}
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1075,8 +1013,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
- GenerateMiss(masm, argc);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+ GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1092,11 +1030,13 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
GenerateCallNormal(masm, argc);
- GenerateMiss(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1107,7 +1047,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
@@ -1141,7 +1081,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateFastArrayLoad(
masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
__ bind(&do_call);
// receiver in rdx is not used after this point.
@@ -1159,13 +1100,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
__ bind(&slow_load);
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
- __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
__ push(rcx); // save the key
__ push(rdx); // pass the receiver
@@ -1192,12 +1133,15 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ j(not_equal, &lookup_monomorphic_cache);
GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
__ jmp(&do_call);
__ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
- GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
+ GenerateMonomorphicCacheProbe(masm,
+ argc,
+ Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1207,7 +1151,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
- __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
GenerateMiss(masm, argc);
__ bind(&index_string);
@@ -1250,7 +1194,170 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ Condition check = masm->CheckNonNegativeSmi(key);
+ __ j(NegateCondition(check), slow_case);
+
+ // Load the elements into scratch1 and check its map. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments.
+ __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
+ __ cmpq(key, scratch2);
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ SmiToInteger64(scratch3, key);
+ __ movq(scratch2, FieldOperand(scratch1,
+ scratch3,
+ times_pointer_size,
+ kHeaderSize));
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ SmiToInteger64(scratch3, scratch2);
+ return FieldOperand(scratch1,
+ scratch3,
+ times_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpq(key, scratch);
+ __ j(greater_equal, slow_case);
+ __ SmiToInteger64(scratch, key);
+ return FieldOperand(backing_store,
+ scratch,
+ times_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(
+ masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
+ __ movq(rax, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movq(rax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
+ __ movq(mapped_location, rax);
+ __ lea(r9, mapped_location);
+ __ movq(r8, rax);
+ __ RecordWrite(rbx, r9, r8);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
+ __ movq(unmapped_location, rax);
+ __ lea(r9, unmapped_location);
+ __ movq(r8, rax);
+ __ RecordWrite(rbx, r9, r8);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+ Label slow, notin;
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
+ __ movq(rdi, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movq(rdi, unmapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
}
@@ -1265,7 +1372,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
+ rdx);
// Cache miss: Jump to runtime.
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1300,7 +1408,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::load_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->load_miss(), 1);
__ pop(rbx);
__ push(rax); // receiver
@@ -1308,143 +1417,21 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test rax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction.
- int delta = *reinterpret_cast<int*>(delta_address);
-
- // The map address is the last 8 bytes of the 10-byte
- // immediate move instruction, so we add 2 to get the
- // offset to the last 8 bytes.
- Address map_address = test_instruction_address + delta + 2;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // The offset is in the 32-bit displacement of a seven byte
- // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
- // so we add 3 to get the offset of the displacement.
- Address offset_address =
- test_instruction_address + delta + kOffsetToLoadInstruction + 3;
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
- return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- // TODO(<bug#>): implement this.
- return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- if (V8::UseCrankshaft()) return false;
-
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test rax, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Extract the encoded deltas from the test rax instruction.
- Address encoded_offsets_address = test_instruction_address + 1;
- int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
- int delta_to_map_check = -(encoded_offsets & 0xFFFF);
- int delta_to_record_write = encoded_offsets >> 16;
-
- // Patch the map to check. The map address is the last 8 bytes of
- // the 10-byte immediate move instruction.
- Address map_check_address = test_instruction_address + delta_to_map_check;
- Address map_address = map_check_address + 2;
- *(reinterpret_cast<Object**>(map_address)) = map;
-
- // Patch the offset in the store instruction. The offset is in the
- // last 4 bytes of a 7 byte register-to-memory move instruction.
- Address offset_address =
- map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
- // The offset should have initial value (kMaxInt - 1), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- // Patch the offset in the write-barrier code. The offset is the
- // last 4 bytes of a 7 byte lea instruction.
- offset_address = map_check_address + delta_to_record_write + 3;
- // The offset should have initial value (kMaxInt), cleared value
- // (-1) or we should be clearing the inlined version.
- ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
- *reinterpret_cast<int*>(offset_address) == -1 ||
- (offset == 0 && map == Heap::null_value()));
- *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
- return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
- if (V8::UseCrankshaft()) return false;
-
- // Arguments are address of start of call sequence that called
- // the IC,
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
- // The keyed load has a fast inlined case if the IC call instruction
- // is immediately followed by a test instruction.
- if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
- // Fetch the offset from the test instruction to the map compare
- // instructions (starting with the 64-bit immediate mov of the map
- // address). This offset is stored in the last 4 bytes of the 5
- // byte test instruction.
- Address delta_address = test_instruction_address + 1;
- int delta = *reinterpret_cast<int*>(delta_address);
- // Compute the map address. The map address is in the last 8 bytes
- // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
- // to the offset to get the map address.
- Address map_address = test_instruction_address + delta + 2;
- // Patch the map check.
- *(reinterpret_cast<Object**>(map_address)) = map;
- return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ IncrementCounter(&Counters::keyed_load_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_miss(), 1);
__ pop(rbx);
__ push(rdx); // receiver
@@ -1452,7 +1439,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1488,7 +1478,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
NOT_IN_LOOP,
MONOMORPHIC,
strict_mode);
- StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
+ no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1510,16 +1501,12 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -1563,7 +1550,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(value);
__ push(scratch); // return address
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
@@ -1585,11 +1573,12 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
- __ IncrementCounter(&Counters::store_normal_hit, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::store_normal_miss, 1);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
}
@@ -1637,7 +1626,27 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1652,7 +1661,10 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(rbx); // return address
// Do tail-call to runtime routine.
- ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/deps/v8/src/x64/jump-target-x64.cc b/deps/v8/src/x64/jump-target-x64.cc
deleted file mode 100644
index e715604632..0000000000
--- a/deps/v8/src/x64/jump-target-x64.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There is an expected frame to merge to.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else if (entry_frame_ != NULL) {
- // Forward jump with a preconfigured entry frame. Assert the
- // current frame matches the expected one and jump to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- } else {
- // Forward jump. Remember the current frame and emit a jump to
- // its merge code.
- AddReachingFrame(cgen()->frame());
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- __ jmp(&merge_labels_.last());
- }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint b) {
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
-
- if (is_bound()) {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
-
- // Swap the current frame for a copy (we do the swapping to get
- // the off-frame registers off the fall through) to use for the
- // branch.
- VirtualFrame* fall_through_frame = cgen()->frame();
- VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers;
- cgen()->SetFrame(branch_frame, &non_frame_registers);
-
- // Check if we can avoid merge code.
- cgen()->frame()->PrepareMergeTo(entry_frame_);
- if (cgen()->frame()->Equals(entry_frame_)) {
- // Branch right in to the block.
- cgen()->DeleteFrame();
- __ j(cc, &entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
-
- // Check if we can reuse existing merge code.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL &&
- cgen()->frame()->Equals(reaching_frames_[i])) {
- // Branch to the merge code.
- cgen()->DeleteFrame();
- __ j(cc, &merge_labels_[i]);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- return;
- }
- }
-
- // To emit the merge code here, we negate the condition and branch
- // around the merge code on the fall through path.
- Label original_fall_through;
- __ j(NegateCondition(cc), &original_fall_through);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- cgen()->SetFrame(fall_through_frame, &non_frame_registers);
- __ bind(&original_fall_through);
-
- } else if (entry_frame_ != NULL) {
- // Forward branch with a preconfigured entry frame. Assert the
- // current frame matches the expected one and branch to the block.
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- // Explicitly use the macro assembler instead of __ as forward
- // branches are expected to be a fixed size (no inserted
- // coverage-checking instructions please). This is used in
- // Reference::GetValue.
- cgen()->masm()->j(cc, &entry_label_);
-
- } else {
- // Forward branch. A copy of the current frame is remembered and
- // a branch to the merge code is emitted. Explicitly use the
- // macro assembler instead of __ as forward branches are expected
- // to be a fixed size (no inserted coverage-checking instructions
- // please). This is used in Reference::GetValue.
- AddReachingFrame(new VirtualFrame(cgen()->frame()));
- cgen()->masm()->j(cc, &merge_labels_.last());
- }
-}
-
-
-void JumpTarget::Call() {
- // Call is used to push the address of the catch block on the stack as
- // a return address when compiling try/catch and try/finally. We
- // fully spill the frame before making the call. The expected frame
- // at the label (which should be the only one) is the spilled current
- // frame plus an in-memory return address. The "fall-through" frame
- // at the return site is the spilled current frame.
- ASSERT(cgen() != NULL);
- ASSERT(cgen()->has_valid_frame());
- // There are no non-frame references across the call.
- ASSERT(cgen()->HasValidEntryRegisters());
- ASSERT(!is_linked());
-
- cgen()->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
- target_frame->Adjust(1);
- // We do not expect a call with a preconfigured entry frame.
- ASSERT(entry_frame_ == NULL);
- AddReachingFrame(target_frame);
- __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(cgen() != NULL);
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- // Fast case: the jump target was manually configured with an entry
- // frame to use.
- if (entry_frame_ != NULL) {
- // Assert no reaching frames to deal with.
- ASSERT(reaching_frames_.is_empty());
- ASSERT(!cgen()->has_valid_frame());
-
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- // Copy the entry frame so the original can be used for a
- // possible backward jump.
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- } else {
- // Take ownership of the entry frame.
- cgen()->SetFrame(entry_frame_, &empty);
- entry_frame_ = NULL;
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (!is_linked()) {
- ASSERT(cgen()->has_valid_frame());
- if (direction_ == FORWARD_ONLY) {
- // Fast case: no forward jumps and no possible backward jumps.
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- VirtualFrame* frame = cgen()->frame();
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
- } else {
- ASSERT(direction_ == BIDIRECTIONAL);
- // Fast case: no forward jumps, possible backward ones. Remove
- // constants and copies above the watermark on the fall-through
- // frame and use it as the entry frame.
- cgen()->frame()->MakeMergable();
- entry_frame_ = new VirtualFrame(cgen()->frame());
- }
- __ bind(&entry_label_);
- return;
- }
-
- if (direction_ == FORWARD_ONLY &&
- !cgen()->has_valid_frame() &&
- reaching_frames_.length() == 1) {
- // Fast case: no fall-through, a single forward jump, and no
- // possible backward jumps. Pick up the only reaching frame, take
- // ownership of it, and use it for the block about to be emitted.
- VirtualFrame* frame = reaching_frames_[0];
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[0] = NULL;
- __ bind(&merge_labels_[0]);
-
- // The stack pointer can be floating above the top of the
- // virtual frame before the bind. Afterward, it should not.
- int difference = frame->stack_pointer_ - (frame->element_count() - 1);
- if (difference > 0) {
- frame->stack_pointer_ -= difference;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
-
- __ bind(&entry_label_);
- return;
- }
-
- // If there is a current frame, record it as the fall-through. It
- // is owned by the reaching frames for now.
- bool had_fall_through = false;
- if (cgen()->has_valid_frame()) {
- had_fall_through = true;
- AddReachingFrame(cgen()->frame()); // Return value ignored.
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- }
-
- // Compute the frame to use for entry to the block.
- ComputeEntryFrame();
-
- // Some moves required to merge to an expected frame require purely
- // frame state changes, and do not require any code generation.
- // Perform those first to increase the possibility of finding equal
- // frames below.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (reaching_frames_[i] != NULL) {
- reaching_frames_[i]->PrepareMergeTo(entry_frame_);
- }
- }
-
- if (is_linked()) {
- // There were forward jumps. Handle merging the reaching frames
- // to the entry frame.
-
- // Loop over the (non-null) reaching frames and process any that
- // need merge code. Iterate backwards through the list to handle
- // the fall-through frame first. Set frames that will be
- // processed after 'i' to NULL if we want to avoid processing
- // them.
- for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
- VirtualFrame* frame = reaching_frames_[i];
-
- if (frame != NULL) {
- // Does the frame (probably) need merge code?
- if (!frame->Equals(entry_frame_)) {
- // We could have a valid frame as the fall through to the
- // binding site or as the fall through from a previous merge
- // code block. Jump around the code we are about to
- // generate.
- if (cgen()->has_valid_frame()) {
- cgen()->DeleteFrame();
- __ jmp(&entry_label_);
- }
- // Pick up the frame for this block. Assume ownership if
- // there cannot be backward jumps.
- RegisterFile empty;
- if (direction_ == BIDIRECTIONAL) {
- cgen()->SetFrame(new VirtualFrame(frame), &empty);
- } else {
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- __ bind(&merge_labels_[i]);
-
- // Loop over the remaining (non-null) reaching frames,
- // looking for any that can share merge code with this one.
- for (int j = 0; j < i; j++) {
- VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen()->frame())) {
- // Set the reaching frame element to null to avoid
- // processing it later, and then bind its entry label.
- reaching_frames_[j] = NULL;
- __ bind(&merge_labels_[j]);
- }
- }
-
- // Emit the merge code.
- cgen()->frame()->MergeTo(entry_frame_);
- } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
- // If this is the fall through frame, and it didn't need
- // merge code, we need to pick up the frame so we can jump
- // around subsequent merge blocks if necessary.
- RegisterFile empty;
- cgen()->SetFrame(frame, &empty);
- reaching_frames_[i] = NULL;
- }
- }
- }
-
- // The code generator may not have a current frame if there was no
- // fall through and none of the reaching frames needed merging.
- // In that case, clone the entry frame as the current frame.
- if (!cgen()->has_valid_frame()) {
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // There may be unprocessed reaching frames that did not need
- // merge code. They will have unbound merge labels. Bind their
- // merge labels to be the same as the entry label and deallocate
- // them.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- if (!merge_labels_[i].is_bound()) {
- reaching_frames_[i] = NULL;
- __ bind(&merge_labels_[i]);
- }
- }
-
- // There are non-NULL reaching frames with bound labels for each
- // merge block, but only on backward targets.
- } else {
- // There were no forward jumps. There must be a current frame and
- // this must be a bidirectional target.
- ASSERT(reaching_frames_.length() == 1);
- ASSERT(reaching_frames_[0] != NULL);
- ASSERT(direction_ == BIDIRECTIONAL);
-
- // Use a copy of the reaching frame so the original can be saved
- // for possible reuse as a backward merge block.
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
- __ bind(&merge_labels_[0]);
- cgen()->frame()->MergeTo(entry_frame_);
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- // Drop leftover statement state from the frame before merging, without
- // emitting code.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- }
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_ + 1);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even on
- // the fall through. This is so we can bind the return target with state
- // on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->ForgetElements(count);
- cgen()->frame()->Push(arg);
- }
- DoBind();
- *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 48844a5bfe..92c8891267 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -39,7 +39,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -49,14 +49,26 @@ class SafepointGenerator : public PostCallGenerator {
deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { }
- virtual void Generate() {
- // Ensure that we have enough space in the reloc info to patch
- // this with calls when doing deoptimization.
- codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ virtual void BeforeCall(int call_size) const {
+ ASSERT(call_size >= 0);
+ // Ensure that we have enough space after the previous safepoint position
+ // for the jump generated there.
+ int call_end = codegen_->masm()->pc_offset() + call_size;
+ int prev_jump_end = codegen_->LastSafepointEnd() + kMinSafepointSize;
+ if (call_end < prev_jump_end) {
+ int padding_size = prev_jump_end - call_end;
+ STATIC_ASSERT(kMinSafepointSize <= 9); // One multibyte nop is enough.
+ codegen_->masm()->nop(padding_size);
+ }
+ }
+
+ virtual void AfterCall() const {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
private:
+ static const int kMinSafepointSize =
+ MacroAssembler::kShortCallInstructionLength;
LCodeGen* codegen_;
LPointerMap* pointers_;
int deoptimization_index_;
@@ -79,7 +91,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(StackSlotCount());
+ code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -88,8 +100,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
void LCodeGen::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -128,16 +140,31 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
+ // Strict mode functions need to replace the receiver with undefined
+ // when called as functions (without an explicit receiver
+ // object). rcx is zero for method calls and non-zero for function
+ // calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
+
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
__ push(rdi); // Callee's JS function.
// Reserve space for the stack slots needed by the code.
- int slots = StackSlotCount();
+ int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ movl(rax, Immediate(slots));
+ __ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
Label loop;
__ bind(&loop);
@@ -170,7 +197,7 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both rax and rsi. It replaces the context
@@ -239,9 +266,8 @@ LInstruction* LCodeGen::GetNextInstruction() {
bool LCodeGen::GenerateJumpTable() {
for (int i = 0; i < jump_table_.length(); i++) {
- JumpTableEntry* info = jump_table_[i];
- __ bind(&(info->label_));
- __ Jump(info->address_, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&jump_table_[i].label);
+ __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
}
return !is_aborted();
}
@@ -279,7 +305,7 @@ bool LCodeGen::GenerateSafepointTable() {
while (byte_count-- > 0) {
__ int3();
}
- safepoints_.Emit(masm(), StackSlotCount());
+ safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -407,7 +433,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = StackSlotCount() + op->index();
+ int src_index = GetStackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -442,7 +468,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
- if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+ if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
@@ -456,7 +482,7 @@ void LCodeGen::CallCode(Handle<Code> code,
}
-void LCodeGen::CallRuntime(Runtime::Function* function,
+void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
ASSERT(instr != NULL);
@@ -549,17 +575,13 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- JumpTableEntry* jump_info = NULL;
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (jump_table_.length() > 0 &&
- jump_table_[jump_table_.length() - 1]->address_ == entry) {
- jump_info = jump_table_[jump_table_.length() - 1];
- } else {
- jump_info = new JumpTableEntry(entry);
- jump_table_.Add(jump_info);
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry) {
+ jump_table_.Add(JumpTableEntry(entry));
}
- __ j(cc, &jump_info->label_);
+ __ j(cc, &jump_table_.last().label);
}
}
@@ -569,14 +591,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
if (length == 0) return;
ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
- Factory::NewDeoptimizationInputData(length, TENURED);
+ factory()->NewDeoptimizationInputData(length, TENURED);
Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
- Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
@@ -670,7 +692,7 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -683,7 +705,7 @@ void LCodeGen::DoLabel(LLabel* label) {
}
__ bind(label->label());
current_block_ = label->block_id();
- LCodeGen::DoGap(label);
+ DoGap(label);
}
@@ -709,6 +731,11 @@ void LCodeGen::DoGap(LGap* gap) {
}
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
void LCodeGen::DoParameter(LParameter* instr) {
// Nothing to do.
}
@@ -732,16 +759,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCharAt: {
- StringCharAtStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::MathPow: {
- MathPowStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -775,41 +792,114 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- LOperand* right = instr->InputAt(1);
- ASSERT(ToRegister(instr->result()).is(rdx));
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->InputAt(0));
- Register right_reg = ToRegister(right);
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+
+ if (divisor < 0) divisor = -divisor;
+
+ Label positive_dividend, done;
+ __ testl(dividend, dividend);
+ __ j(not_sign, &positive_dividend, Label::kNear);
+ __ negl(dividend);
+ __ andl(dividend, Immediate(divisor - 1));
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ j(not_zero, &done, Label::kNear);
+ DeoptimizeIf(no_condition, instr->environment());
+ } else {
+ __ jmp(&done, Label::kNear);
+ }
+ __ bind(&positive_dividend);
+ __ andl(dividend, Immediate(divisor - 1));
+ __ bind(&done);
+ } else {
+ Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Register left_reg = ToRegister(instr->InputAt(0));
+ Register right_reg = ToRegister(instr->InputAt(1));
+ Register result_reg = ToRegister(instr->result());
+
+ ASSERT(left_reg.is(rax));
+ ASSERT(result_reg.is(rdx));
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ testl(left_reg, left_reg);
+ __ j(zero, &remainder_eq_dividend, Label::kNear);
+ __ j(sign, &slow, Label::kNear);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
+ __ j(not_sign, &both_positive, Label::kNear);
+ // The sign of the divisor doesn't matter.
+ __ neg(right_reg);
- // Sign extend eax to edx. (We are using only the low 32 bits of the values.)
- __ cdq();
+ __ bind(&both_positive);
+ // If the dividend is smaller than the nonnegative
+ // divisor, the dividend is the result.
+ __ cmpl(left_reg, right_reg);
+ __ j(less, &remainder_eq_dividend, Label::kNear);
+
+ // Check if the divisor is a PowerOfTwo integer.
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ movl(scratch, right_reg);
+ __ subl(scratch, Immediate(1));
+ __ testl(scratch, right_reg);
+ __ j(not_zero, &do_subtraction, Label::kNear);
+ __ andl(left_reg, scratch);
+ __ jmp(&remainder_eq_dividend, Label::kNear);
+
+ __ bind(&do_subtraction);
+ const int kUnfolds = 3;
+ // Try a few subtractions of the dividend.
+ __ movl(scratch, left_reg);
+ for (int i = 0; i < kUnfolds; i++) {
+ // Reduce the dividend by the divisor.
+ __ subl(left_reg, right_reg);
+ // Check if the dividend is less than the divisor.
+ __ cmpl(left_reg, right_reg);
+ __ j(less, &remainder_eq_dividend, Label::kNear);
+ }
+ __ movl(left_reg, scratch);
+
+ // Slow case, using idiv instruction.
+ __ bind(&slow);
+ // Sign extend eax to edx.
+ // (We are using only the low 32 bits of the values.)
+ __ cdq();
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ Label done;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
+ __ idivl(right_reg);
+
+ // Test the remainder for 0, because then the result would be -0.
+ __ testl(result_reg, result_reg);
+ __ j(not_zero, &done, Label::kNear);
+
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&positive_left);
+ __ idivl(right_reg);
+ __ bind(&done);
+ } else {
+ __ idivl(right_reg);
+ }
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&remainder_eq_dividend);
+ __ movl(result_reg, left_reg);
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel positive_left;
- NearLabel done;
- __ testl(rax, rax);
- __ j(not_sign, &positive_left);
- __ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ testl(rdx, rdx);
- __ j(not_zero, &done);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idivl(right_reg);
__ bind(&done);
- } else {
- __ idivl(right_reg);
}
}
@@ -832,9 +922,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel left_not_zero;
+ Label left_not_zero;
__ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero);
+ __ j(not_zero, &left_not_zero, Label::kNear);
__ testl(right_reg, right_reg);
DeoptimizeIf(sign, instr->environment());
__ bind(&left_not_zero);
@@ -842,9 +932,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (-kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- NearLabel left_not_min_int;
+ Label left_not_min_int;
__ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int);
+ __ j(not_zero, &left_not_min_int, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
__ bind(&left_not_min_int);
@@ -868,24 +958,64 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ movl(kScratchRegister, left);
}
+ bool can_overflow =
+ instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (right->IsConstantOperand()) {
int right_value = ToInteger32(LConstantOperand::cast(right));
- __ imull(left, left, Immediate(right_value));
+ if (right_value == -1) {
+ __ negl(left);
+ } else if (right_value == 0) {
+ __ xorl(left, left);
+ } else if (right_value == 2) {
+ __ addl(left, left);
+ } else if (!can_overflow) {
+ // If the multiplication is known to not overflow, we
+ // can use operations that don't set the overflow flag
+ // correctly.
+ switch (right_value) {
+ case 1:
+ // Do nothing.
+ break;
+ case 3:
+ __ leal(left, Operand(left, left, times_2, 0));
+ break;
+ case 4:
+ __ shll(left, Immediate(2));
+ break;
+ case 5:
+ __ leal(left, Operand(left, left, times_4, 0));
+ break;
+ case 8:
+ __ shll(left, Immediate(3));
+ break;
+ case 9:
+ __ leal(left, Operand(left, left, times_8, 0));
+ break;
+ case 16:
+ __ shll(left, Immediate(4));
+ break;
+ default:
+ __ imull(left, left, Immediate(right_value));
+ break;
+ }
+ } else {
+ __ imull(left, left, Immediate(right_value));
+ }
} else if (right->IsStackSlot()) {
__ imull(left, ToOperand(right));
} else {
__ imull(left, ToRegister(right));
}
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (can_overflow) {
DeoptimizeIf(overflow, instr->environment());
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
- NearLabel done;
+ Label done;
__ testl(left, left);
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
DeoptimizeIf(no_condition, instr->environment());
@@ -1038,7 +1168,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
- __ movl(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Set(ToRegister(instr->result()), instr->value());
}
@@ -1050,7 +1180,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
if (int_val == 0) {
- __ xorpd(res, res);
+ __ xorps(res, res);
} else {
Register tmp = ToRegister(instr->TempAt(0));
__ Set(tmp, int_val);
@@ -1079,10 +1209,24 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
}
-void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(array, PixelArray::kLengthOffset));
+ __ movl(result, FieldOperand(array, ExternalPixelArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte.
+ __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(result, Immediate(Map::kElementsKindMask));
+ __ shr(result, Immediate(Map::kElementsKindShift));
}
@@ -1090,13 +1234,13 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
- NearLabel done;
+ Label done;
// If the object is a smi return the object.
- __ JumpIfSmi(input, &done);
+ __ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
__ movq(result, FieldOperand(input, JSValue::kValueOffset));
__ bind(&done);
@@ -1142,25 +1286,32 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ XMMRegister left = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister result = ToDoubleRegister(instr->result());
// All operations except MOD are computed in-place.
- ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+ ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
- __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ addsd(left, right);
break;
case Token::SUB:
- __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ subsd(left, right);
break;
case Token::MUL:
- __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ mulsd(left, right);
break;
case Token::DIV:
- __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ divsd(left, right);
break;
case Token::MOD:
- Abort("Unimplemented: %s", "DoArithmeticD MOD");
+ __ PrepareCallCFunction(2);
+ __ movaps(xmm0, left);
+ ASSERT(right.is(xmm1));
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movaps(result, xmm0);
break;
default:
UNREACHABLE();
@@ -1174,7 +1325,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -1219,7 +1370,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm0, xmm0);
+ __ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
@@ -1227,7 +1378,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
HType type = instr->hydrogen()->type();
if (type.IsBoolean()) {
- __ Cmp(reg, Factory::true_value());
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, equal);
} else if (type.IsSmi()) {
__ SmiCompare(reg, Smi::FromInt(0));
@@ -1242,19 +1393,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, true_label);
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, false_label);
- __ SmiCompare(reg, Smi::FromInt(0));
+ __ Cmp(reg, Smi::FromInt(0));
__ j(equal, false_label);
__ JumpIfSmi(reg, true_label);
// Test for double values. Plus/minus zero and NaN are false.
- NearLabel call_stub;
+ Label call_stub;
__ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_stub);
+ __ j(not_equal, &call_stub, Label::kNear);
// HeapNumber => false iff +0, -0, or NaN. These three cases set the
// zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
+ __ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, false_label);
__ jmp(true_label);
@@ -1262,7 +1413,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(rax);
__ Pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1274,44 +1425,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1364,20 +1488,20 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
- NearLabel unordered;
+ Label unordered;
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered);
+ __ j(parity_even, &unordered, Label::kNear);
} else {
EmitCmpI(left, right);
}
- NearLabel done;
+ Label done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
__ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ j(cc, &done);
+ __ j(cc, &done, Label::kNear);
__ bind(&unordered);
__ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
@@ -1405,23 +1529,23 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
- NearLabel different, done;
+ Label different, done;
__ cmpq(left, right);
- __ j(not_equal, &different);
+ __ j(not_equal, &different, Label::kNear);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&different);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1432,6 +1556,29 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpConstantEq(LCmpConstantEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+ __ cmpq(left, Immediate(instr->hydrogen()->right()));
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ j(equal, &done, Label::kNear);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ cmpq(left, Immediate(instr->hydrogen()->right()));
+ EmitBranch(true_block, false_block, equal);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1446,28 +1593,29 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
+ ASSERT(Heap::kTrueValueRootIndex >= 0);
__ movl(result, Immediate(Heap::kTrueValueRootIndex));
- NearLabel load;
- __ j(equal, &load);
- __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+ Label load;
+ __ j(equal, &load, Label::kNear);
+ __ Set(result, Heap::kFalseValueRootIndex);
__ bind(&load);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ __ LoadRootIndexed(result, result, 0);
} else {
- NearLabel true_value, false_value, done;
- __ j(equal, &true_value);
+ Label false_value, true_value, done;
+ __ j(equal, &true_value, Label::kNear);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, &true_value);
- __ JumpIfSmi(reg, &false_value);
+ __ j(equal, &true_value, Label::kNear);
+ __ JumpIfSmi(reg, &false_value, Label::kNear);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value);
+ __ j(not_zero, &true_value, Label::kNear);
__ bind(&false_value);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
@@ -1491,14 +1639,14 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
- __ Cmp(reg, Factory::null_value());
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
- __ Cmp(reg, Factory::undefined_value());
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@@ -1530,9 +1678,9 @@ Condition LCodeGen::EmitIsObject(Register input,
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return below_equal;
}
@@ -1582,8 +1730,7 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size,
- Heap::kTrueValueRootIndex * kPointerSize));
+ __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
}
@@ -1603,6 +1750,40 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ __ JumpIfSmi(input, &false_label);
+ __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &false_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, not_zero);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1629,12 +1810,13 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ testl(input, Immediate(kSmiTagMask));
- NearLabel done, is_false;
+ Label done, is_false;
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())),
+ &is_false, Label::kNear);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
@@ -1656,6 +1838,20 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
}
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ movl(result, FieldOperand(input, String::kHashFieldOffset));
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ IndexFromHash(result, result);
+}
+
+
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1664,8 +1860,8 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- NearLabel done;
- __ j(not_zero, &done);
+ Label done;
+ __ j(zero, &done, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
@@ -1680,7 +1876,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(true_block, false_block, equal);
}
@@ -1692,26 +1888,27 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register input,
Register temp) {
__ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- __ j(equal, is_false);
+ __ j(above_equal, is_false);
}
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -1744,7 +1941,7 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
- NearLabel done;
+ Label done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
@@ -1753,7 +1950,7 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
@@ -1793,30 +1990,17 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
__ push(ToRegister(instr->InputAt(0)));
__ push(ToRegister(instr->InputAt(1)));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- NearLabel true_value, done;
+ Label true_value, done;
__ testq(rax, rax);
- __ j(zero, &true_value);
+ __ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, zero);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -1824,26 +2008,53 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredLInstanceOfKnownGlobal(instr_);
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
+ Label* map_check() { return &map_check_; }
+
private:
LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
- Label false_result;
+ Label done, false_result;
Register object = ToRegister(instr->InputAt(0));
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result);
- // Null is not an instance of anything.
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ // Use a temp register to avoid memory operands with variable lengths.
+ Register map = ToRegister(instr->TempAt(0));
+ __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ __ movq(kScratchRegister, factory()->the_hole_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(map, kScratchRegister); // Patched to cached map.
+ __ j(not_equal, &cache_miss, Label::kNear);
+ // Patched to load either true or false.
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
+#ifdef DEBUG
+ // Check that the code size between patch label and patch sites is invariant.
+ Label end_of_patched_code;
+ __ bind(&end_of_patched_code);
+ ASSERT(true);
+#endif
+ __ jmp(&done);
+
+ // The inlined call site cache did not match. Check for null and string
+ // before calling the deferred code.
+ __ bind(&cache_miss); // Null is not an instance of anything.
__ CompareRoot(object, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
+ __ j(equal, &false_result, Label::kNear);
// String values are not instances of anything.
__ JumpIfNotString(object, kScratchRegister, deferred->entry());
@@ -1852,23 +2063,40 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ bind(deferred->exit());
+ __ bind(&done);
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
{
PushSafepointRegistersScope scope(this);
-
- InstanceofStub stub(InstanceofStub::kNoFlags);
+ InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+ InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
+ InstanceofStub stub(flags);
__ push(ToRegister(instr->InputAt(0)));
__ Push(instr->function());
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ Register temp = ToRegister(instr->TempAt(0));
+ static const int kAdditionalDelta = 10;
+ int delta =
+ masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+ ASSERT(delta >= 0);
+ __ push_imm32(delta);
+
+ // We are pushing three values on the stack but recording a
+ // safepoint with two arguments because stub is going to
+ // remove the third argument from the stack before jumping
+ // to instanceof builtin on the slow path.
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
2);
+ ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+ // Move result to a register that survives the end of the
+ // PushSafepointRegisterScope.
__ movq(kScratchRegister, rax);
}
__ testq(kScratchRegister, kScratchRegister);
@@ -1893,36 +2121,17 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
- NearLabel true_value, done;
+ Label true_value, done;
__ testq(rax, rax);
- __ j(condition, &true_value);
+ __ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime
@@ -1932,11 +2141,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ movq(rsp, rbp);
__ pop(rbp);
- __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
+ __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
if (result.is(rax)) {
__ load_rax(instr->hydrogen()->cell().location(),
@@ -1952,7 +2161,19 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
}
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ __ Move(rcx, instr->name());
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+ RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
ASSERT(!value.is(temp));
@@ -1975,6 +2196,18 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->global_object()).is(rdx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ __ Move(rcx, instr->name());
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -1988,7 +2221,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ movq(ContextOperand(context, instr->slot_index()), value);
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, offset, value, kScratchRegister);
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ RecordWrite(context, offset, value, scratch);
}
}
@@ -2005,12 +2239,82 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name) {
+ LookupResult lookup;
+ type->LookupInDescriptors(NULL, *name, &lookup);
+ ASSERT(lookup.IsProperty() &&
+ (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+ if (lookup.type() == FIELD) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ movq(result, FieldOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else {
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+ LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+
+ int map_count = instr->hydrogen()->types()->length();
+ Handle<String> name = instr->hydrogen()->name();
+
+ if (map_count == 0) {
+ ASSERT(instr->hydrogen()->need_generic());
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ Label done;
+ for (int i = 0; i < map_count - 1; ++i) {
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label next;
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ __ j(not_equal, &next, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&next);
+ }
+ Handle<Map> map = instr->hydrogen()->types()->last();
+ __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+ if (instr->hydrogen()->need_generic()) {
+ Label generic;
+ __ j(not_equal, &generic, Label::kNear);
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ __ jmp(&done, Label::kNear);
+ __ bind(&generic);
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ EmitLoadFieldOrConstantFunction(result, object, map, name);
+ }
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rcx, instr->name());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2024,10 +2328,10 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
DeoptimizeIf(not_equal, instr->environment());
// Check whether the function has an instance prototype.
- NearLabel non_instance;
+ Label non_instance;
__ testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &non_instance);
+ __ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
__ movq(result,
@@ -2038,13 +2342,13 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
DeoptimizeIf(equal, instr->environment());
// If the function does not have an initial map, we're done.
- NearLabel done;
+ Label done;
__ CmpObjectType(result, MAP_TYPE, kScratchRegister);
- __ j(not_equal, &done);
+ __ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
__ movq(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
@@ -2061,26 +2365,40 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register input = ToRegister(instr->InputAt(0));
__ movq(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- NearLabel done;
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ j(equal, &done);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Factory::pixel_array_map());
- __ j(equal, &done);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Factory::fixed_cow_array_map());
- __ Check(equal, "Check for fast elements failed.");
+ Label done, ok, fail;
+ __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(equal, &done, Label::kNear);
+ __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &done, Label::kNear);
+ Register temp((result.is(rax)) ? rbx : rax);
+ __ push(temp);
+ __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
+ __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
+ __ and_(temp, Immediate(Map::kElementsKindMask));
+ __ shr(temp, Immediate(Map::kElementsKindShift));
+ __ cmpl(temp, Immediate(JSObject::FAST_ELEMENTS));
+ __ j(equal, &ok, Label::kNear);
+ __ cmpl(temp, Immediate(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ j(less, &fail, Label::kNear);
+ __ cmpl(temp, Immediate(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ j(less_equal, &ok, Label::kNear);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed");
+ __ bind(&ok);
+ __ pop(temp);
__ bind(&done);
}
}
-void LCodeGen::DoLoadPixelArrayExternalPointer(
- LLoadPixelArrayExternalPointer* instr) {
+void LCodeGen::DoLoadExternalArrayPointer(
+ LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
- __ movq(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
+ __ movq(result, FieldOperand(input,
+ ExternalPixelArray::kExternalPointerOffset));
}
@@ -2115,19 +2433,80 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
FixedArray::kHeaderSize));
// Check for the hole value.
- __ Cmp(result, Factory::the_hole_value());
- DeoptimizeIf(equal, instr->environment());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
}
-void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
- Register external_elements = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register result = ToRegister(instr->result());
- ASSERT(result.is(external_elements));
+Operand LCodeGen::BuildExternalArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind) {
+ Register external_pointer_reg = ToRegister(external_pointer);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
+ if (key->IsConstantOperand()) {
+ int constant_value = ToInteger32(LConstantOperand::cast(key));
+ if (constant_value & 0xF0000000) {
+ Abort("array index constant value too big");
+ }
+ return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+ } else {
+ ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+ return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+ }
+}
- // Load the result.
- __ movzxbq(result, Operand(external_elements, key, times_1, 0));
+
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ movsxbq(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ __ movzxbq(result, operand);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ movsxwq(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzxwq(result, operand);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ __ movsxlq(result, operand);
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(result, operand);
+ __ testl(result, result);
+ // TODO(danno): we could be more clever here, perhaps having a special
+ // version of the stub that detects if the overflow case actually
+ // happens, and generate code that returns a double rather than int.
+ DeoptimizeIf(negative, instr->environment());
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
}
@@ -2135,7 +2514,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2144,15 +2523,15 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
// Check for arguments adapter frame.
- NearLabel done, adapted;
+ Label done, adapted;
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
+ __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
__ movq(result, rbp);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
@@ -2167,7 +2546,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Register result = ToRegister(instr->result());
- NearLabel done;
+ Label done;
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->InputAt(0)->IsRegister()) {
@@ -2175,14 +2554,14 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
} else {
__ cmpq(rbp, ToOperand(instr->InputAt(0)));
}
- __ movq(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done);
+ __ movl(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(result, result);
+ __ SmiToInteger32(result,
+ Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
// Argument length is in result register.
__ bind(&done);
@@ -2198,27 +2577,46 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ASSERT(function.is(rdi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(rax));
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
- NearLabel global_object, receiver_ok;
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
+ Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ movq(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Do not transform the receiver to object for builtins.
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
__ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object);
+ __ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok);
+ __ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
- __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
@@ -2232,10 +2630,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Loop through the arguments pushing them onto the execution
// stack.
- NearLabel invoke, loop;
+ Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ testl(length, length);
- __ j(zero, &invoke);
+ __ j(zero, &invoke, Label::kNear);
__ bind(&loop);
__ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
__ decl(length);
@@ -2252,26 +2650,27 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
pointers,
env->deoptimization_index());
v8::internal::ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+ __ InvokeFunction(function, actual, CALL_FUNCTION,
+ safepoint_generator, CALL_AS_METHOD);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- EmitPushConstantOperand(argument);
- } else if (argument->IsRegister()) {
- __ push(ToRegister(argument));
- } else {
- ASSERT(!argument->IsDoubleRegister());
- __ push(ToOperand(argument));
- }
+ EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movq(result, rsi);
}
@@ -2279,8 +2678,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movq(result,
- Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+ Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2291,18 +2689,19 @@ void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Register global = ToRegister(instr->global());
Register result = ToRegister(instr->result());
- __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+ __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr) {
+ LInstruction* instr,
+ CallKind call_kind) {
// Change context if needed.
bool change_context =
- (graph()->info()->closure()->context() != function->context()) ||
+ (info()->closure()->context() != function->context()) ||
scope()->contains_with() ||
(scope()->num_heap_slots() > 0);
if (change_context) {
@@ -2319,7 +2718,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position());
// Invoke function.
- if (*function == *graph()->info()->closure()) {
+ __ SetCallKind(rcx, call_kind);
+ if (*function == *info()->closure()) {
__ CallSelf();
} else {
__ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
@@ -2336,7 +2736,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rdi, instr->function());
- CallKnownFunction(instr->function(), instr->arity(), instr);
+ CallKnownFunction(instr->function(),
+ instr->arity(),
+ instr,
+ CALL_AS_METHOD);
}
@@ -2423,7 +2826,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(scratch, scratch);
+ __ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ andpd(input_reg, scratch);
} else if (r.IsInteger32()) {
@@ -2434,7 +2837,9 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
EmitIntegerMathAbs(instr);
+ __ Integer32ToSmi(input_reg, input_reg);
__ bind(deferred->exit());
}
}
@@ -2444,21 +2849,36 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(below_equal, instr->environment());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatures::Scope scope(SSE4_1);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Deoptimize if minus zero.
+ __ movq(output_reg, input_reg);
+ __ subq(output_reg, Immediate(1));
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+ __ cvttsd2si(output_reg, xmm_scratch);
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
} else {
- DeoptimizeIf(below, instr->environment());
- }
+ __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ }
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
+ }
}
@@ -2467,33 +2887,45 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Label done;
// xmm_scratch = 0.5
__ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
__ movq(xmm_scratch, kScratchRegister);
-
+ Label below_half;
+ __ ucomisd(xmm_scratch, input_reg);
+ // If input_reg is NaN, this doesn't jump.
+ __ j(above, &below_half, Label::kNear);
// input = input + 0.5
+ // This addition might give a result that isn't the correct for
+ // rounding, due to loss of precision, but only for a number that's
+ // so big that the conversion below will overflow anyway.
__ addsd(input_reg, xmm_scratch);
+ // Compute Math.floor(input).
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done);
- // We need to return -0 for the input range [-0.5, 0[, otherwise
- // compute Math.floor(value + 0.5).
+ __ bind(&below_half);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below_equal, instr->environment());
+ // Bailout if negative (including -0).
+ __ movq(output_reg, input_reg);
+ __ testq(output_reg, output_reg);
+ DeoptimizeIf(negative, instr->environment());
} else {
- // If we don't need to bailout on -0, we check only bailout
- // on negative inputs.
- __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ // Bailout if below -0.5, otherwise round to (positive) zero, even
+ // if negative.
+ // xmm_scrach = -0.5
+ __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(below, instr->environment());
}
+ __ xorl(output_reg, output_reg);
- // Compute Math.floor(value + 0.5).
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
-
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ bind(&done);
}
@@ -2508,7 +2940,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ xorpd(xmm_scratch, xmm_scratch);
+ __ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
}
@@ -2524,23 +2956,24 @@ void LCodeGen::DoPower(LPower* instr) {
if (exponent_type.IsDouble()) {
__ PrepareCallCFunction(2);
// Move arguments to correct registers
- __ movsd(xmm0, left_reg);
+ __ movaps(xmm0, left_reg);
ASSERT(ToDoubleRegister(right).is(xmm1));
- __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 2);
} else if (exponent_type.IsInteger32()) {
__ PrepareCallCFunction(2);
// Move arguments to correct registers: xmm0 and edi (not rdi).
// On Windows, the registers are xmm0 and edx.
- __ movsd(xmm0, left_reg);
+ __ movaps(xmm0, left_reg);
#ifdef _WIN64
ASSERT(ToRegister(right).is(rdx));
#else
ASSERT(ToRegister(right).is(rdi));
#endif
- __ CallCFunction(ExternalReference::power_double_int_function(), 2);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(isolate()), 2);
} else {
ASSERT(exponent_type.IsTagged());
- CpuFeatures::Scope scope(SSE2);
Register right_reg = ToRegister(right);
Label non_smi, call;
@@ -2557,12 +2990,15 @@ void LCodeGen::DoPower(LPower* instr) {
__ bind(&call);
__ PrepareCallCFunction(2);
// Move arguments to correct registers xmm0 and xmm1.
- __ movsd(xmm0, left_reg);
+ __ movaps(xmm0, left_reg);
// Right argument is already in xmm1.
- __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
- __ movsd(result_reg, xmm0);
+ __ movaps(result_reg, xmm0);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2576,7 +3012,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2584,7 +3020,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2623,12 +3059,28 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
+ ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator generator(this, pointers, env->deoptimization_index());
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
+ arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2638,9 +3090,11 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ Move(rcx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, mode, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2649,7 +3103,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Drop(1);
@@ -2659,9 +3113,11 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ Move(rcx, instr->name());
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ CallCode(ic, mode, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2669,7 +3125,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Move(rdi, instr->target());
- CallKnownFunction(instr->target(), instr->arity(), instr);
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -2677,7 +3133,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
- Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ Set(rax, instr->arity());
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -2723,28 +3179,50 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
- Register external_pointer = ToRegister(instr->external_pointer());
- Register key = ToRegister(instr->key());
- Register value = ToRegister(instr->value());
-
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(value, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, value); // 1 if negative, 0 if positive.
- __ decb(value); // 0 if negative, 255 if positive.
- __ bind(&done);
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister value(ToDoubleRegister(instr->value()));
+ __ cvtsd2ss(value, value);
+ __ movss(operand, value);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(operand, ToDoubleRegister(instr->value()));
+ } else {
+ Register value(ToRegister(instr->value()));
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movb(operand, value);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movw(operand, value);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(operand, value);
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
-
- __ movb(Operand(external_pointer, key, times_1, 0), value);
}
@@ -2794,13 +3272,21 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic(Builtins::builtin(
- info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic = instr->strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
@@ -2835,7 +3321,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
- NearLabel flat_string, ascii_string, done;
+ Label flat_string, ascii_string, done;
// Fetch the instance type of the receiver into result register.
__ movq(result, FieldOperand(string, HeapObject::kMapOffset));
@@ -2844,7 +3330,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
// We need special handling for non-sequential strings.
STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
+ __ j(zero, &flat_string, Label::kNear);
// Handle cons strings and go to deferred code for the rest.
__ testb(result, Immediate(kIsConsStringMask));
@@ -2871,7 +3357,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
+ __ j(not_zero, &ascii_string, Label::kNear);
// Two-byte string.
// Load the two-byte character code into the result register.
@@ -2887,7 +3373,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
times_2,
SeqTwoByteString::kHeaderSize));
}
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// ASCII string.
// Load the byte into the result register.
@@ -2937,6 +3423,53 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
}
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+ ASSERT(!char_code.is(result));
+
+ __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
+ __ j(above, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ movq(result, FieldOperand(result,
+ char_code, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ j(equal, deferred->entry());
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ PushSafepointRegistersScope scope(this);
+ __ Integer32ToSmi(char_code, char_code);
+ __ push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
void LCodeGen::DoStringLength(LStringLength* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
@@ -3029,29 +3562,35 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
+ bool deoptimize_on_undefined,
LEnvironment* env) {
- NearLabel load_smi, heap_number, done;
+ Label load_smi, done;
// Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(equal, &heap_number);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number;
+ __ j(equal, &heap_number, Label::kNear);
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorpd(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done);
+ // Convert undefined to NaN. Compute NaN as 0/0.
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ __ bind(&heap_number);
+ }
// Heap number to XMM conversion.
- __ bind(&heap_number);
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Smi to XMM conversion
__ bind(&load_smi);
@@ -3072,7 +3611,7 @@ class DeferredTaggedToI: public LDeferredCode {
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- NearLabel done, heap_number;
+ Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
// Heap number map check.
@@ -3080,20 +3619,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Heap::kHeapNumberMapRootIndex);
if (instr->truncating()) {
- __ j(equal, &heap_number);
+ __ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr->environment());
- __ movl(input_reg, Immediate(0));
- __ jmp(&done);
+ __ Set(input_reg, 0);
+ __ jmp(&done, Label::kNear);
__ bind(&heap_number);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2siq(input_reg, xmm0);
__ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpl(input_reg, kScratchRegister);
+ __ cmpq(input_reg, kScratchRegister);
DeoptimizeIf(equal, instr->environment());
} else {
// Deoptimize if we don't have a heap number.
@@ -3140,7 +3679,9 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg, instr->environment());
+ EmitNumberUntagD(input_reg, result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ instr->environment());
}
@@ -3158,8 +3699,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
// the JS bitwise operations.
__ cvttsd2siq(result_reg, input_reg);
__ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
- __ cmpl(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ cmpq(result_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
} else {
__ cvttsd2si(result_reg, input_reg);
__ cvtlsi2sd(xmm0, result_reg);
@@ -3167,11 +3708,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- NearLabel done;
+ Label done;
// The integer converted back is equal to the original. We
// only have to test if we got -0 as an input.
__ testl(result_reg, result_reg);
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
__ movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
@@ -3186,41 +3727,59 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
Condition cc = masm()->CheckSmi(ToRegister(input));
- if (instr->condition() != equal) {
- cc = NegateCondition(cc);
- }
+ DeoptimizeIf(NegateCondition(cc), instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->InputAt(0));
- InstanceType first = instr->hydrogen()->first();
- InstanceType last = instr->hydrogen()->last();
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
- // If there is only one type in the interval check for equality.
- if (first == last) {
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first;
+ InstanceType last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(first)));
- DeoptimizeIf(not_equal, instr->environment());
- } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
- // String has a dedicated bit in instance type.
- __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(kIsNotStringMask));
- DeoptimizeIf(not_zero, instr->environment());
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(last)));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
} else {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT(tag == 0 || IsPowerOf2(tag));
+ __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(mask));
+ DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+ } else {
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ andb(kScratchRegister, Immediate(mask));
+ __ cmpb(kScratchRegister, Immediate(tag));
+ DeoptimizeIf(not_equal, instr->environment());
}
}
}
@@ -3244,10 +3803,61 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ Register temp_reg = ToRegister(instr->TempAt(0));
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register value_reg = ToRegister(instr->result());
+ __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ ASSERT(instr->unclamped()->Equals(instr->result()));
+ Register input_reg = ToRegister(instr->unclamped());
+ Register temp_reg = ToRegister(instr->TempAt(0));
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
+ Label is_smi, done, heap_number;
+
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ Cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movq(input_reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+
+ // Heap number
+ __ bind(&heap_number);
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+ __ jmp(&done, Label::kNear);
+
+ // smi
+ __ bind(&is_smi);
+ __ SmiToInteger32(input_reg, input_reg);
+ __ ClampUint8(input_reg);
+
+ __ bind(&done);
+}
+
+
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (Heap::InNewSpace(*object)) {
+ if (heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(object);
+ factory()->NewJSGlobalPropertyCell(object);
__ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
} else {
@@ -3328,8 +3938,15 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
}
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ __ push(rax);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- NearLabel materialized;
+ Label materialized;
// Registers will be used as follows:
// rdi = JS function.
// rcx = literals array.
@@ -3341,7 +3958,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
instr->hydrogen()->literal_index() * kPointerSize;
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized);
+ __ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
@@ -3385,14 +4002,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
- if (shared_info->num_literals() == 0 && !pretenure) {
- FastNewClosureStub stub;
+ if (!pretenure && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(
+ shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ Push(shared_info);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
__ Push(shared_info);
- __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ __ PushRoot(pretenure ?
+ Heap::kTrueValueRootIndex :
+ Heap::kFalseValueRootIndex);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
@@ -3400,14 +4020,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->InputAt(0);
- if (input->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(input)));
- } else if (input->IsRegister()) {
- __ push(ToRegister(input));
- } else {
- ASSERT(input->IsStackSlot());
- __ push(ToOperand(input));
- }
+ EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -3417,7 +4030,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
- NearLabel done;
+ Label done;
Condition final_branch_condition = EmitTypeofIs(&true_label,
&false_label,
@@ -3426,7 +4039,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
__ j(final_branch_condition, &true_label);
__ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
@@ -3435,19 +4048,14 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
}
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
- ASSERT(operand->IsConstantOperand());
- LConstantOperand* const_op = LConstantOperand::cast(operand);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ push(Immediate(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ __ Push(ToHandle(LConstantOperand::cast(operand)));
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
} else {
- ASSERT(r.IsTagged());
- __ Push(literal);
+ __ push(ToOperand(operand));
}
}
@@ -3473,28 +4081,28 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(Heap::number_symbol())) {
+ if (type_name->Equals(heap()->number_symbol())) {
__ JumpIfSmi(input, true_label);
- __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::string_symbol())) {
+ } else if (type_name->Equals(heap()->string_symbol())) {
__ JumpIfSmi(input, false_label);
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+ __ j(above_equal, false_label);
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
- final_branch_condition = below;
+ final_branch_condition = zero;
- } else if (type_name->Equals(Heap::boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_symbol())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(Heap::undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_symbol())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
@@ -3504,24 +4112,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(Heap::function_symbol())) {
+ } else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
final_branch_condition = above_equal;
- } else if (type_name->Equals(Heap::object_symbol())) {
+ } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
- __ Cmp(input, Factory::null_value());
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label);
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- // Check for JS objects that are not RegExp or Function => true.
- __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
- __ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- final_branch_condition = below_equal;
+ final_branch_condition = zero;
} else {
final_branch_condition = never;
@@ -3534,15 +4141,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
- NearLabel done;
+ Label true_label;
+ Label done;
EmitIsConstructCall(result);
- __ j(equal, &true_label);
+ __ j(equal, &true_label, Label::kNear);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
@@ -3567,16 +4173,16 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
__ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
- __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
+ Label check_frame_marker;
+ __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker, Label::kNear);
__ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
- __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
}
@@ -3594,20 +4200,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
- // Push object.
- if (obj->IsRegister()) {
- __ push(ToRegister(obj));
- } else {
- __ push(ToOperand(obj));
- }
- // Push key.
- if (key->IsConstantOperand()) {
- EmitPushConstantOperand(key);
- } else if (key->IsRegister()) {
- __ push(ToRegister(key));
- } else {
- __ push(ToOperand(key));
- }
+ EmitPushTaggedOperand(obj);
+ EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
LEnvironment* env = instr->deoptimization_environment();
@@ -3620,19 +4214,64 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
pointers,
env->deoptimization_index());
__ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+ LOperand* obj = instr->object();
+ LOperand* key = instr->key();
+ EmitPushTaggedOperand(key);
+ EmitPushTaggedOperand(obj);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this);
+ CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- NearLabel done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &done, Label::kNear);
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(below, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 88832faf79..feacc2c82e 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -56,7 +56,7 @@ class LCodeGen BASE_EMBEDDED {
jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
- scope_(chunk->graph()->info()->scope()),
+ scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
@@ -67,6 +67,10 @@ class LCodeGen BASE_EMBEDDED {
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
@@ -77,7 +81,6 @@ class LCodeGen BASE_EMBEDDED {
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
-
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -91,12 +94,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -120,7 +126,7 @@ class LCodeGen BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
- return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@@ -136,8 +142,8 @@ class LCodeGen BASE_EMBEDDED {
Register input,
Register temporary);
- int StackSlotCount() const { return chunk()->spill_slot_count(); }
- int ParameterCount() const { return scope()->num_parameters(); }
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@@ -168,14 +174,14 @@ class LCodeGen BASE_EMBEDDED {
RelocInfo::Mode mode,
LInstruction* instr);
- void CallRuntime(Runtime::Function* function,
+ void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
@@ -188,7 +194,8 @@ class LCodeGen BASE_EMBEDDED {
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
- LInstruction* instr);
+ LInstruction* instr,
+ CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
@@ -208,6 +215,10 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ Operand BuildExternalArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -231,12 +242,18 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
- void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+ void EmitNumberUntagD(Register input,
+ XMMRegister result,
+ bool deoptimize_on_undefined,
+ LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -255,15 +272,21 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- // Emits code for pushing a constant operand.
- void EmitPushConstantOperand(LOperand* operand);
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name);
+
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
struct JumpTableEntry {
- inline JumpTableEntry(Address address)
- : label_(),
- address_(address) { }
- Label label_;
- Address address_;
+ explicit inline JumpTableEntry(Address entry)
+ : label(),
+ address(entry) { }
+ Label label;
+ Address address;
};
LChunk* const chunk_;
@@ -274,7 +297,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry*> jump_table_;
+ ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index cedd0256dd..c3c617c456 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -214,7 +214,7 @@ void LGapResolver::EmitMove(int index) {
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
+ __ movaps(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ movsd(cgen_->ToOperand(destination), src);
@@ -273,9 +273,9 @@ void LGapResolver::EmitSwap(int index) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movsd(xmm0, source_reg);
- __ movsd(source_reg, destination_reg);
- __ movsd(destination_reg, xmm0);
+ __ movaps(xmm0, source_reg);
+ __ movaps(source_reg, destination_reg);
+ __ movaps(destination_reg, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 2f413feb9d..42e60c3d90 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -71,22 +71,21 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as
- // temporaries and outputs because all registers
- // are blocked by the calling convention.
- // Inputs must use a fixed register.
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LOperand* operand = it.Next();
- ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
- !LUnallocated::cast(operand)->HasRegisterPolicy());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
@@ -114,21 +113,18 @@ void LInstruction::PrintTo(StringStream* stream) {
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -240,6 +236,13 @@ void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -303,6 +306,13 @@ void LStoreContextSlot::PrintDataTo(StringStream* stream) {
}
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[rcx] #%d / ", arity());
}
@@ -380,8 +390,7 @@ void LChunk::MarkEmptyBlocks() {
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -442,7 +451,7 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LGap* gap = new LGap(block);
+ LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -470,7 +479,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - graph()->info()->scope()->num_parameters() - 1;
+ int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
@@ -478,7 +487,7 @@ int LChunk::GetParameterStackSlot(int index) const {
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + graph()->info()->scope()->num_parameters() - index) *
+ return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
@@ -517,7 +526,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(graph());
+ chunk_ = new LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -534,8 +543,8 @@ LChunk* LChunkBuilder::Build() {
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -790,6 +799,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -845,24 +859,22 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseFixed(right_value, rcx);
}
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool can_deopt = (op == Token::SHR && constant_value == 0);
- if (can_deopt) {
- bool can_truncate = true;
- for (int i = 0; i < instr->uses()->length(); i++) {
- if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
- can_truncate = false;
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ bool does_deopt = false;
+ if (may_deopt) {
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+ does_deopt = true;
break;
}
}
- can_deopt = !can_truncate;
}
- LShiftI* result = new LShiftI(op, left, right, can_deopt);
- return can_deopt
- ? AssignEnvironment(DefineSameAsFirst(result))
- : DefineSameAsFirst(result);
+ LInstruction* result =
+ DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
}
@@ -871,9 +883,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- if (op == Token::MOD) {
- Abort("Unimplemented: %s", "DoArithmeticD MOD");
- }
+ ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right);
@@ -1008,6 +1018,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
+ if (hydrogen_env->is_special_index(i)) continue;
+
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
@@ -1025,108 +1037,85 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
- if (v->EmitAtUses()) {
- if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
-
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
- LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
- right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
-
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
- UseFixed(instance_of->right(), rdx));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
+ if (!v->EmitAtUses()) return new LBranch(UseRegisterAtStart(v));
+ ASSERT(!v->HasSideEffects());
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
} else {
- if (v->IsConstant()) {
- if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(instr->FirstSuccessor()->block_id());
- } else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(instr->SecondSuccessor()->block_id());
- }
- }
- Abort("Undefined compare before branch");
- return NULL;
+ ASSERT(r.IsDouble());
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
}
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareConstantEq()) {
+ HCompareConstantEq* compare = HCompareConstantEq::cast(v);
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
+ } else if (v->IsConstant()) {
+ HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ ? instr->FirstSuccessor()
+ : instr->SecondSuccessor();
+ return new LGoto(successor->block_id());
+ } else {
+ Abort("Undefined compare before branch");
+ return NULL;
}
- return new LBranch(UseRegisterAtStart(v));
}
@@ -1158,7 +1147,8 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax));
+ new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax),
+ FixedTemp(rdi));
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1183,8 +1173,13 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1200,7 +1195,8 @@ LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- return DefineAsRegister(new LGlobalReceiver);
+ LOperand* global_object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LGlobalReceiver(global_object));
}
@@ -1211,6 +1207,14 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
}
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
+ argument_count_ -= instr->argument_count();
+ LInvokeFunction* result = new LInvokeFunction(function);
+ return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1348,13 +1352,23 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new LModI(value, divisor, temp);
- LInstruction* result = DefineFixed(mod, rdx);
+
+ LInstruction* result;
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
+ result = DefineSameAsFirst(mod);
+ } else {
+ // The temporary operand is necessary to ensure that right is not
+ // allocated into edx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* value = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LModI* mod = new LModI(value, divisor, temp);
+ result = DefineFixed(mod, rdx);
+ }
+
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero))
? AssignEnvironment(result)
@@ -1366,8 +1380,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
- LOperand* right = UseFixedDouble(instr->right(), xmm2);
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
+ LOperand* right = UseFixedDouble(instr->right(), xmm1);
LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
@@ -1481,15 +1495,21 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoCompareConstantEq(
+ HCompareConstantEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LCmpConstantEq(left));
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1514,6 +1534,14 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1524,8 +1552,10 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
- Abort("Unimplemented: %s", "DoGetCachedArrayIndex");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
}
@@ -1555,9 +1585,16 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
}
-LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+LInstruction* LChunkBuilder::DoExternalArrayLength(
+ HExternalArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LPixelArrayLength(array));
+ return DefineAsRegister(new LExternalArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
}
@@ -1587,6 +1624,19 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
@@ -1600,10 +1650,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
- LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
+ bool truncating = instr->CanTruncateToInt32();
+ LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
@@ -1646,7 +1694,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, zero));
+ return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1666,7 +1714,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, not_zero));
+ return AssignEnvironment(new LCheckSmi(value));
}
@@ -1683,6 +1731,53 @@ LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
}
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new LClampDToUint8(reg,
+ TempRegister()));
+ } else if (input_rep.IsInteger32()) {
+ return DefineSameAsFirst(new LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsTagged());
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LClampTToUint8* result = new LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(xmm1));
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseRegister(value);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* xmm_temp =
+ CpuFeatures::IsSupported(SSE3)
+ ? NULL
+ : FixedTemp(xmm1);
+ return AssignEnvironment(
+ DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), rax));
}
@@ -1704,21 +1799,36 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
- TempRegister());
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), rax);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LStoreGlobalCell* result =
+ new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* global_object = UseFixed(instr->global_object(), rdx);
+ LOperand* value = UseFixed(instr->value(), rax);
+ LStoreGlobalGeneric* result = new LStoreGlobalGeneric(global_object, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
@@ -1726,17 +1836,19 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- Abort("Unimplemented: DoStoreContextSlot"); // Temporarily disabled (whesse).
LOperand* context;
LOperand* value;
+ LOperand* temp;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
+ temp = TempRegister();
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
+ temp = NULL;
}
- return new LStoreContextSlot(context, value);
+ return new LStoreContextSlot(context, value, temp);
}
@@ -1747,6 +1859,21 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), rax);
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), rax);
LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
@@ -1767,10 +1894,10 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
- HLoadPixelArrayExternalPointer* instr) {
+LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
+ HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
+ return DefineAsRegister(new LLoadExternalArrayPointer(input));
}
@@ -1785,16 +1912,27 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
- HLoadPixelArrayElement* instr) {
- ASSERT(instr->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Representation representation(instr->representation());
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
- LOperand* external_pointer =
- UseRegisterAtStart(instr->external_pointer());
- LOperand* key = UseRegisterAtStart(instr->key());
- LLoadPixelArrayElement* result =
- new LLoadPixelArrayElement(external_pointer, key);
- return DefineSameAsFirst(result);
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
+ // An unsigned int array load might overflow and cause a deopt, make sure it
+ // has an environment.
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1826,17 +1964,32 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
-LInstruction* LChunkBuilder::DoStorePixelArrayElement(
- HStorePixelArrayElement* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ Representation representation(instr->value()->representation());
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegister(instr->key());
+ bool val_is_temp_register =
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstant(instr->key());
- return new LStorePixelArrayElement(external_pointer, key, val);
+ return new LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -1883,6 +2036,13 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* left = UseOrConstantAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1891,6 +2051,13 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
}
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
@@ -1919,7 +2086,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LDeleteProperty* result =
- new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+ new LDeleteProperty(UseAtStart(instr->object()),
+ UseOrConstantAtStart(instr->key()));
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1971,6 +2139,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
}
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), rax);
+ LToFastProperties* result = new LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, rax), instr);
@@ -2002,7 +2177,6 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->Push(value);
}
}
- ASSERT(env->length() == instr->environment_length());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@@ -2020,7 +2194,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2029,8 +2208,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- false,
- undefined);
+ undefined,
+ instr->call_kind());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2043,6 +2222,15 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
return NULL;
}
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* key = UseOrConstantAtStart(instr->key());
+ LOperand* object = UseOrConstantAtStart(instr->object());
+ LIn* result = new LIn(key, object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index fed5b8cb88..d500dfd879 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -70,15 +71,22 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
+ V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
+ V(CmpConstantEq) \
+ V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -87,41 +95,49 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
+ V(ExternalArrayLength) \
+ V(FixedArrayLength) \
V(FunctionLiteral) \
- V(Gap) \
+ V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(FixedArrayLength) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
+ V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
- V(HasInstanceType) \
- V(HasInstanceTypeAndBranch) \
- V(HasCachedArrayIndex) \
- V(HasCachedArrayIndexAndBranch) \
- V(ClassOfTest) \
- V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
- V(LoadGlobal) \
+ V(LoadExternalArrayPointer) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
- V(LoadPixelArrayElement) \
- V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -131,7 +147,6 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
- V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -141,41 +156,40 @@ class LCodeGen;
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
- V(StoreGlobal) \
+ V(StoreGlobalCell) \
+ V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
- V(StorePixelArrayElement) \
+ V(StringAdd) \
V(StringCharCodeAt) \
+ V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
+ V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
-#define DECLARE_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const { return LInstruction::k##type; } \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- DECLARE_INSTRUCTION(type)
-
-
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
@@ -198,10 +212,25 @@ class LInstruction: public ZoneObject {
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
- // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
- LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -259,37 +288,6 @@ class LInstruction: public ZoneObject {
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -312,9 +310,9 @@ class LTemplateInstruction: public LInstruction {
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -328,8 +326,13 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
parallel_moves_[AFTER] = NULL;
}
- DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const { return true; }
virtual void PrintDataTo(StringStream* stream);
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
bool IsRedundant() const;
@@ -359,21 +362,26 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
+class LInstructionGap: public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -447,7 +455,6 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- DECLARE_INSTRUCTION(ControlInstruction)
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@@ -608,26 +615,49 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+};
+
+
+class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCmpConstantEq(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
+};
+
+
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
+ inputs_[0] = left;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
@@ -705,6 +735,31 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
@@ -730,6 +785,17 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
};
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
@@ -796,17 +862,17 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
+class LIn: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LIn(LOperand* key, LOperand* object) {
+ inputs_[0] = key;
+ inputs_[1] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ LOperand* key() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
- Token::Value op() const { return hydrogen()->token(); }
+ DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
@@ -821,21 +887,11 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LInstanceOfKnownGlobal(LOperand* value) {
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -984,14 +1040,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
-class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LPixelArrayLength(LOperand* value) {
+ explicit LExternalArrayLength(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
- DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+ DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
};
@@ -1006,6 +1062,17 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
@@ -1071,6 +1138,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
+ virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1087,6 +1155,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1118,6 +1187,19 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+
+ LOperand* object() { return inputs_[0]; }
+};
+
+
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@@ -1155,14 +1237,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
- "load-pixel-array-external-pointer")
+ DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
+ "load-external-array-pointer")
};
@@ -1181,19 +1263,23 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
- "load-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1211,22 +1297,55 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LStoreGlobal(LOperand* value, LOperand* temp) {
+ explicit LLoadGlobalGeneric(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* global_object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LStoreGlobalGeneric(LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = global_object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+ LOperand* global_object() { return InputAt(0); }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ LOperand* value() { return InputAt(1); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1246,11 +1365,12 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
@@ -1275,6 +1395,11 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1299,9 +1424,15 @@ class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalReceiver(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+
+ LOperand* global() { return InputAt(0); }
};
@@ -1317,6 +1448,23 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInvokeFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ LOperand* function() { return inputs_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
@@ -1401,7 +1549,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- Runtime::Function* function() const { return hydrogen()->function(); }
+ const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1445,7 +1593,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1460,7 +1608,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1483,6 +1631,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -1541,6 +1690,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -1564,23 +1714,26 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
};
-class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStorePixelArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
- "store-pixel-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
};
@@ -1593,12 +1746,29 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringAdd(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
};
@@ -1617,6 +1787,19 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
+class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringCharFromCode(LOperand* char_code) {
+ inputs_[0] = char_code;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+
+ LOperand* char_code() { return inputs_[0]; }
+};
+
+
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
@@ -1679,20 +1862,62 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* value, Condition condition)
- : condition_(condition) {
+ explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
- Condition condition() const { return condition_; }
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const {
- return (condition_ == zero) ? "check-non-smi" : "check-smi";
+
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClampDToUint8(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
}
- private:
- Condition condition_;
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* value,
+ LOperand* temp,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
@@ -1726,6 +1951,17 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
};
+class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
@@ -1824,14 +2060,21 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(HGraph* graph)
+ explicit LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
@@ -1848,6 +2091,7 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
@@ -1884,6 +2128,7 @@ class LChunk: public ZoneObject {
private:
int spill_slot_count_;
+ CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
@@ -1893,8 +2138,9 @@ class LChunk: public ZoneObject {
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(HGraph* graph, LAllocator* allocator)
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
+ info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
@@ -1923,6 +2169,7 @@ class LChunkBuilder BASE_EMBEDDED {
};
LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
@@ -2029,6 +2276,7 @@ class LChunkBuilder BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
+ CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
@@ -2044,7 +2292,6 @@ class LChunkBuilder BASE_EMBEDDED {
};
#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::int
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 8845bbb77c..7c8a3667e3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
#include "serialize.h"
@@ -40,36 +40,156 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ root_array_available_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
+ Address roots_register_value = kRootRegisterBias +
+ reinterpret_cast<Address>(isolate->heap()->roots_address());
+ intptr_t delta = other.address() - roots_register_value;
+ return delta;
+}
+
+
+Operand MacroAssembler::ExternalOperand(ExternalReference target,
+ Register scratch) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(target, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ return Operand(kRootRegister, static_cast<int32_t>(delta));
+ }
+ }
+ movq(scratch, target);
+ return Operand(scratch, 0);
+}
+
+
+void MacroAssembler::Load(Register destination, ExternalReference source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(source, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+ // Safe code.
+ if (destination.is(rax)) {
+ load_rax(source);
+ } else {
+ movq(kScratchRegister, source);
+ movq(destination, Operand(kScratchRegister, 0));
+ }
+}
+
+
+void MacroAssembler::Store(ExternalReference destination, Register source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(destination, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ return;
+ }
+ }
+ // Safe code.
+ if (source.is(rax)) {
+ store_rax(destination);
+ } else {
+ movq(kScratchRegister, destination);
+ movq(Operand(kScratchRegister, 0), source);
+ }
+}
+
+
+void MacroAssembler::LoadAddress(Register destination,
+ ExternalReference source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ intptr_t delta = RootRegisterDelta(source, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+ // Safe code.
+ movq(destination, source);
+}
+
+
+int MacroAssembler::LoadAddressSize(ExternalReference source) {
+ if (root_array_available_ && !Serializer::enabled()) {
+ // This calculation depends on the internals of LoadAddress.
+ // It's correctness is ensured by the asserts in the Call
+ // instruction below.
+ intptr_t delta = RootRegisterDelta(source, isolate());
+ if (is_int32(delta)) {
+ Serializer::TooLateToEnableNow();
+ // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
+ int size = 4;
+ if (!is_int8(static_cast<int32_t>(delta))) {
+ size += 3; // Need full four-byte displacement in lea.
+ }
+ return size;
+ }
+ }
+ // Size of movq(destination, src);
+ return 10;
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
+ ASSERT(root_array_available_);
+ movq(destination, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
+}
+
+
+void MacroAssembler::LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset) {
+ ASSERT(root_array_available_);
+ movq(destination,
+ Operand(kRootRegister,
+ variable_offset, times_pointer_size,
+ (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
+ ASSERT(root_array_available_);
+ movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- push(Operand(kRootRegister, index << kPointerSizeLog2));
+ ASSERT(root_array_available_);
+ push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
+ ASSERT(root_array_available_);
+ cmpq(with, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
+ ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
@@ -79,10 +199,10 @@ void MacroAssembler::CompareRoot(const Operand& with,
void MacroAssembler::RecordWriteHelper(Register object,
Register addr,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
- NearLabel not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space);
+ Label not_in_new_space;
+ InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
@@ -101,6 +221,42 @@ void MacroAssembler::RecordWriteHelper(Register object,
}
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance near_jump) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ if (scratch.is(object)) {
+ movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+ and_(scratch, kScratchRegister);
+ } else {
+ movq(scratch, ExternalReference::new_space_mask(isolate()));
+ and_(scratch, object);
+ }
+ movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+ cmpq(scratch, kScratchRegister);
+ j(cc, branch, near_jump);
+ } else {
+ ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ intptr_t new_space_start =
+ reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ if (scratch.is(object)) {
+ addq(scratch, kScratchRegister);
+ } else {
+ lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ }
+ and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ j(cc, branch, near_jump);
+ }
+}
+
+
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@@ -111,7 +267,7 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
+ // catch stores of smis and stores into the young generation.
Label done;
JumpIfSmi(value, &done);
@@ -123,7 +279,7 @@ void MacroAssembler::RecordWrite(Register object,
// clobbering done inside RecordWriteNonSmi but it's necessary to
// avoid having the fast case for smis leave the registers
// unchanged.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -140,7 +296,7 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
+ // catch stores of smis and stores into the young generation.
Label done;
JumpIfSmi(value, &done);
@@ -152,7 +308,7 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -166,9 +322,9 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
Register index) {
Label done;
- if (FLAG_debug_code) {
- NearLabel okay;
- JumpIfNotSmi(object, &okay);
+ if (emit_debug_code()) {
+ Label okay;
+ JumpIfNotSmi(object, &okay, Label::kNear);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
@@ -210,7 +366,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -218,19 +374,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
}
void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (FLAG_debug_code) Check(cc, msg);
+ if (emit_debug_code()) Check(cc, msg);
}
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
- NearLabel ok;
+ if (emit_debug_code()) {
+ Label ok;
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
- j(equal, &ok);
+ j(equal, &ok, Label::kNear);
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
- j(equal, &ok);
+ j(equal, &ok, Label::kNear);
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
}
@@ -238,8 +394,8 @@ void MacroAssembler::AssertFastElements(Register elements) {
void MacroAssembler::Check(Condition cc, const char* msg) {
- NearLabel L;
- j(cc, &L);
+ Label L;
+ j(cc, &L, Label::kNear);
Abort(msg);
// will not return here
bind(&L);
@@ -251,9 +407,9 @@ void MacroAssembler::CheckStackAlignment() {
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
- NearLabel alignment_as_expected;
+ Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
+ j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
bind(&alignment_as_expected);
@@ -264,9 +420,9 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
- NearLabel ok;
+ Label ok;
testl(result, result);
- j(not_zero, &ok);
+ j(not_zero, &ok, Label::kNear);
testl(op, op);
j(sign, then_label);
bind(&ok);
@@ -305,9 +461,9 @@ void MacroAssembler::Abort(const char* msg) {
}
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -378,9 +534,9 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
- movq(rbx, ExternalReference(function));
+ LoadAddress(rbx, ExternalReference(function, isolate()));
CEntryStub ces(1);
ces.SaveDoubles();
CallStub(&ces);
@@ -393,7 +549,8 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -407,19 +564,19 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// should remove this need and make the runtime routine entry code
// smarter.
Set(rax, num_arguments);
- movq(rbx, ExternalReference(f));
+ LoadAddress(rbx, ExternalReference(f, isolate()));
CEntryStub ces(f->result_size);
CallStub(&ces);
}
-MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
// Since we did not call the stub, there was no allocation failure.
// Return some non-failure object.
- return Heap::undefined_value();
+ return HEAP->undefined_value();
}
// TODO(1236192): Most runtime routines don't need the number of
@@ -427,7 +584,7 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
// should remove this need and make the runtime routine entry code
// smarter.
Set(rax, num_arguments);
- movq(rbx, ExternalReference(f));
+ LoadAddress(rbx, ExternalReference(f, isolate()));
CEntryStub ces(f->result_size);
return TryCallStub(&ces);
}
@@ -436,7 +593,7 @@ MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
Set(rax, num_arguments);
- movq(rbx, ext);
+ LoadAddress(rbx, ext);
CEntryStub stub(1);
CallStub(&stub);
@@ -483,14 +640,16 @@ MaybeObject* MacroAssembler::TryTailCallExternalReference(
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- return TryTailCallExternalReference(ExternalReference(fid),
+ return TryTailCallExternalReference(ExternalReference(fid, isolate()),
num_arguments,
result_size);
}
@@ -527,6 +686,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
Label leave_exit_frame;
Label write_back;
+ Factory* factory = isolate()->factory();
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
const int kNextOffset = 0;
@@ -537,12 +697,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(),
next_address);
ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
+ ExternalReference::scheduled_exception_address(isolate());
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
- Register base_reg = r12;
+ Register base_reg = r15;
movq(base_reg, next_address);
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
@@ -574,7 +734,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
// Check if the function scheduled an exception.
movq(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), Factory::the_hole_value());
+ Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
LeaveApiExitFrame();
@@ -589,14 +749,20 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
bind(&empty_result);
// It was zero; the result is undefined.
- Move(rax, Factory::undefined_value());
+ Move(rax, factory->undefined_value());
jmp(&prologue);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
movq(prev_limit_reg, rax);
- movq(rax, ExternalReference::delete_handle_scope_extensions());
+#ifdef _WIN64
+ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+ LoadAddress(rax,
+ ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
movq(rax, prev_limit_reg);
jmp(&leave_exit_frame);
@@ -608,7 +774,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
- movq(rbx, ext);
+ LoadAddress(rbx, ext);
CEntryStub ces(result_size);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -617,7 +783,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
MaybeObject* MacroAssembler::TryJumpToExternalReference(
const ExternalReference& ext, int result_size) {
// Set the entry point and jump to the C entry runtime stub.
- movq(rbx, ext);
+ LoadAddress(rbx, ext);
CEntryStub ces(result_size);
return TryTailCallStub(&ces);
}
@@ -625,7 +791,7 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@@ -634,7 +800,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, post_call_generator);
+ InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
}
@@ -659,10 +825,10 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
+ } else if (is_int32(x)) {
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
movq(dst, x, RelocInfo::NONE);
}
@@ -672,7 +838,7 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(kScratchRegister, x, RelocInfo::NONE);
+ Set(kScratchRegister, x);
movq(dst, kScratchRegister);
}
}
@@ -694,7 +860,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
}
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(dst,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE);
@@ -702,17 +868,17 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (allow_stub_calls()) {
Assert(equal, "Uninitialized kSmiConstantRegister");
} else {
- NearLabel ok;
- j(equal, &ok);
+ Label ok;
+ j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
}
- if (source->value() == 0) {
+ int value = source->value();
+ if (value == 0) {
xorl(dst, dst);
return;
}
- int value = source->value();
bool negative = value < 0;
unsigned int uvalue = negative ? -value : value;
@@ -763,10 +929,10 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
testb(dst, Immediate(0x01));
- NearLabel ok;
- j(zero, &ok);
+ Label ok;
+ j(zero, &ok, Label::kNear);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
} else {
@@ -783,9 +949,9 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src,
int constant) {
if (dst.is(src)) {
- addq(dst, Immediate(constant));
+ addl(dst, Immediate(constant));
} else {
- lea(dst, Operand(src, constant));
+ leal(dst, Operand(src, constant));
}
shl(dst, Immediate(kSmiShift));
}
@@ -824,12 +990,24 @@ void MacroAssembler::SmiTest(Register src) {
}
-void MacroAssembler::SmiCompare(Register dst, Register src) {
- cmpq(dst, src);
+void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(smi1);
+ AbortIfNotSmi(smi2);
+ }
+ cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ }
+ Cmp(dst, src);
+}
+
+
+void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
testq(dst, dst);
@@ -841,20 +1019,39 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ AbortIfNotSmi(src);
+ }
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ AbortIfNotSmi(src);
+ }
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+ if (emit_debug_code()) {
+ AbortIfNotSmi(dst);
+ }
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
+void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
+ // The Operand cannot use the smi register.
+ Register smi_reg = GetSmiConstant(src);
+ ASSERT(!dst.AddressUsesRegister(smi_reg));
+ cmpq(dst, smi_reg);
+}
+
+
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
@@ -892,6 +1089,24 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
}
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+ Label* on_not_smis,
+ Label::Distance near_jump) {
+ if (dst.is(src1) || dst.is(src2)) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ movq(kScratchRegister, src1);
+ or_(kScratchRegister, src2);
+ JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ or_(dst, src2);
+ JumpIfNotSmi(dst, on_not_smis, near_jump);
+ }
+}
+
+
Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
@@ -908,7 +1123,7 @@ Condition MacroAssembler::CheckSmi(const Operand& src) {
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
- // Make mask 0x8000000000000001 and test that both bits are zero.
+ // Test that both bits of the mask 0x8000000000000001 are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
@@ -1002,6 +1217,95 @@ void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
}
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+ Label* on_invalid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+ Label* on_invalid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register src,
+ Label* on_smi,
+ Label::Distance near_jump) {
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src,
+ Label* on_not_smi,
+ Label::Distance near_jump) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessNonNegativeSmi(
+ Register src, Label* on_not_smi_or_negative,
+ Label::Distance near_jump) {
+ Condition non_negative_smi = CheckNonNegativeSmi(src);
+ j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ Label* on_equals,
+ Label::Distance near_jump) {
+ SmiCompare(src, constant);
+ j(equal, on_equals, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+ Register src2,
+ Label* on_not_both_smi,
+ Label::Distance near_jump) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
+ Register src2,
+ Label* on_not_both_smi,
+ Label::Distance near_jump) {
+ Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+
+ JumpIfNotSmi(src, on_not_smi_result, near_jump);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ LoadSmiConstant(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result, near_jump);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1058,6 +1362,30 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
}
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1082,19 +1410,148 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
}
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result, near_jump);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result, near_jump);
+ LoadSmiConstant(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+ }
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+ Register src,
+ Label* on_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result, near_jump);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result, near_jump);
+ }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ const Operand& src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ movq(dst, kScratchRegister);
+ } else {
+ ASSERT(!src2.AddressUsesRegister(dst));
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible.
+ if (!dst.is(src1)) {
+ if (emit_debug_code()) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ Check(no_overflow, "Smi addition overflow");
+ }
+ lea(dst, Operand(src1, src2, times_1, 0));
+ } else {
+ addq(dst, src2);
+ Assert(no_overflow, "Smi addition overflow");
+ }
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
- addq(dst, src2);
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ subq(dst, src2);
} else {
movq(dst, src1);
- addq(dst, src2);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
}
- Assert(no_overflow, "Smi addition overflow");
}
@@ -1102,13 +1559,30 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
ASSERT(!dst.is(src2));
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ subq(dst, src2);
+ Assert(no_overflow, "Smi subtraction overflow");
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
if (dst.is(src1)) {
- subq(dst, src2);
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
+ j(overflow, on_not_smi_result, near_jump);
+ subq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
}
- Assert(no_overflow, "Smi subtraction overflow");
}
@@ -1117,16 +1591,188 @@ void MacroAssembler::SmiSub(Register dst,
const Operand& src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
- if (dst.is(src1)) {
- subq(dst, src2);
- } else {
+ if (!dst.is(src1)) {
movq(dst, src1);
- subq(dst, src2);
}
+ subq(dst, src2);
Assert(no_overflow, "Smi subtraction overflow");
}
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+
+ if (dst.is(src1)) {
+ Label failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure, Label::kNear);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result, Label::kNear);
+
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ // Result was positive zero.
+ j(positive, &zero_correct_result, Label::kNear);
+
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+
+ bind(&zero_correct_result);
+ Set(dst, 0);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result, near_jump);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result, Label::kNear);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result, near_jump);
+ bind(&correct_result);
+ }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ testq(src2, src2);
+ j(zero, on_not_smi_result, near_jump);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ Label safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div, Label::kNear);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div, Label::kNear);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+ } else {
+ j(negative, on_not_smi_result, near_jump);
+ }
+ bind(&safe_div);
+
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
+ idivl(src2);
+ Integer32ToSmi(src2, src2);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ if (src1.is(rax)) {
+ Label smi_result;
+ j(zero, &smi_result, Label::kNear);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result, near_jump);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result, near_jump);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
+
+ testq(src2, src2);
+ j(zero, on_not_smi_result, near_jump);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ Label safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div, Label::kNear);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div, Label::kNear);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&safe_div);
+
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ Label smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result, Label::kNear);
+ testq(src1, src1);
+ j(negative, on_not_smi_result, near_jump);
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
+}
+
+
void MacroAssembler::SmiNot(Register dst, Register src) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src.is(kScratchRegister));
@@ -1166,6 +1812,7 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
+ ASSERT(!src1.is(src2));
movq(dst, src1);
}
or_(dst, src2);
@@ -1186,6 +1833,7 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
+ ASSERT(!src1.is(src2));
movq(dst, src1);
}
xor_(dst, src2);
@@ -1231,11 +1879,28 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
}
+void MacroAssembler::SmiShiftLogicalRightConstant(
+ Register dst, Register src, int shift_value,
+ Label* on_not_smi_result, Label::Distance near_jump) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
+ j(negative, on_not_smi_result, near_jump);
+ }
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ }
+}
+
+
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2) {
ASSERT(!dst.is(rcx));
- NearLabel result_ok;
// Untag shift amount.
if (!dst.is(src1)) {
movq(dst, src1);
@@ -1247,6 +1912,45 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ // dst and src1 can be the same, because the one case that bails out
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ Label positive_result;
+ j(positive, &positive_result, Label::kNear);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&positive_result);
+ } else {
+ // src2 was zero and src1 negative.
+ j(negative, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
@@ -1274,6 +1978,45 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis,
+ Label::Distance near_jump) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
+ j(not_zero, on_not_smis, near_jump);
+
+ // Exactly one operand is a smi.
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1309,6 +2052,104 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
}
+void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
+ ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
+void MacroAssembler::JumpIfNotString(Register object,
+ Register object_map,
+ Label* not_string,
+ Label::Distance near_jump) {
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, not_string, near_jump);
+ CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+ j(above_equal, not_string, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
+ Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail,
+ Label::Distance near_jump) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail, near_jump);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail, near_jump);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ Label* failure,
+ Label::Distance near_jump) {
+ if (!scratch.is(instance_type)) {
+ movl(scratch, instance_type);
+ }
+
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+ andl(scratch, Immediate(kFlatAsciiStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ j(not_equal, failure, near_jump);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail,
+ Label::Distance near_jump) {
+ // Load instance type for both strings.
+ movq(scratch1, first_object_instance_type);
+ movq(scratch2, second_object_instance_type);
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail, near_jump);
+}
+
+
+
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
movq(dst, src);
@@ -1339,7 +2180,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
- SmiCompare(dst, Smi::cast(*source));
+ Cmp(dst, Smi::cast(*source));
} else {
Move(kScratchRegister, source);
cmpq(dst, kScratchRegister);
@@ -1349,7 +2190,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
- SmiCompare(dst, Smi::cast(*source));
+ Cmp(dst, Smi::cast(*source));
} else {
ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1393,7 +2234,7 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
void MacroAssembler::Jump(ExternalReference ext) {
- movq(kScratchRegister, ext);
+ LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
}
@@ -1410,21 +2251,48 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
+int MacroAssembler::CallSize(ExternalReference ext) {
+ // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
+ const int kCallInstructionSize = 3;
+ return LoadAddressSize(ext) + kCallInstructionSize;
+}
+
+
void MacroAssembler::Call(ExternalReference ext) {
- movq(kScratchRegister, ext);
+#ifdef DEBUG
+ int end_position = pc_offset() + CallSize(ext);
+#endif
+ LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
+#ifdef DEBUG
+ CHECK_EQ(end_position, pc_offset());
+#endif
}
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+#ifdef DEBUG
+ int end_position = pc_offset() + CallSize(destination, rmode);
+#endif
movq(kScratchRegister, destination, rmode);
call(kScratchRegister);
+#ifdef DEBUG
+ CHECK_EQ(pc_offset(), end_position);
+#endif
}
-void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+#ifdef DEBUG
+ int end_position = pc_offset() + CallSize(code_object);
+#endif
ASSERT(RelocInfo::IsCodeTarget(rmode));
- call(code_object, rmode);
+ call(code_object, rmode, ast_id);
+#ifdef DEBUG
+ CHECK_EQ(end_position, pc_offset());
+#endif
}
@@ -1440,10 +2308,10 @@ void MacroAssembler::Pushad() {
push(r9);
// r10 is kScratchRegister.
push(r11);
- push(r12);
+ // r12 is kSmiConstantRegister.
// r13 is kRootRegister.
push(r14);
- // r15 is kSmiConstantRegister
+ push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
@@ -1457,8 +2325,8 @@ void MacroAssembler::Popad() {
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
lea(rsp, Operand(rsp, sp_delta));
+ pop(r15);
pop(r14);
- pop(r12);
pop(r11);
pop(r9);
pop(r8);
@@ -1477,7 +2345,7 @@ void MacroAssembler::Dropad() {
// Order general registers are pushed by Pushad:
-// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
0,
1,
@@ -1491,10 +2359,10 @@ int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
7,
-1,
8,
- 9,
-1,
- 10,
- -1
+ -1,
+ 9,
+ 10
};
@@ -1544,18 +2412,20 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
push(Immediate(0)); // NULL frame pointer.
}
// Save the current handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- push(Operand(kScratchRegister, 0));
+ Operand handler_operand =
+ ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
+ push(handler_operand);
// Link this handler.
- movq(Operand(kScratchRegister, 0), rsp);
+ movq(handler_operand, rsp);
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
// Unlink this handler.
- movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- pop(Operand(kScratchRegister, 0));
+ Operand handler_operand =
+ ExternalOperand(ExternalReference(Isolate::k_handler_address, isolate()));
+ pop(handler_operand);
// Remove the remaining fields.
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -1573,21 +2443,20 @@ void MacroAssembler::Throw(Register value) {
movq(rax, value);
}
- ExternalReference handler_address(Top::k_handler_address);
- movq(kScratchRegister, handler_address);
- movq(rsp, Operand(kScratchRegister, 0));
+ ExternalReference handler_address(Isolate::k_handler_address, isolate());
+ Operand handler_operand = ExternalOperand(handler_address);
+ movq(rsp, handler_operand);
// get next in chain
- pop(rcx);
- movq(Operand(kScratchRegister, 0), rcx);
+ pop(handler_operand);
pop(rbp); // pop frame pointer
pop(rdx); // remove state
// Before returning we restore the context from the frame pointer if not NULL.
// The frame pointer is NULL in the exception handler of a JS entry frame.
Set(rsi, 0); // Tentatively set context pointer to NULL
- NearLabel skip;
+ Label skip;
cmpq(rbp, Immediate(0));
- j(equal, &skip);
+ j(equal, &skip, Label::kNear);
movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
bind(&skip);
ret(0);
@@ -1601,17 +2470,16 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
movq(rax, value);
}
// Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- movq(kScratchRegister, handler_address);
- movq(rsp, Operand(kScratchRegister, 0));
+ ExternalReference handler_address(Isolate::k_handler_address, isolate());
+ Load(rsp, handler_address);
// Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
+ Label loop, done;
bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done);
+ j(equal, &done, Label::kNear);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
movq(rsp, Operand(rsp, kNextOffset));
@@ -1619,19 +2487,21 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- movq(kScratchRegister, handler_address);
- pop(Operand(kScratchRegister, 0));
+ Operand handler_operand = ExternalOperand(handler_address);
+ pop(handler_operand);
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- movq(rax, Immediate(false));
- store_rax(external_caught);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
+ Set(rax, static_cast<int64_t>(false));
+ Store(external_caught, rax);
// Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
+ ExternalReference pending_exception(Isolate::k_pending_exception_address,
+ isolate());
movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- store_rax(pending_exception);
+ Store(pending_exception, rax);
}
// Clear the context pointer.
@@ -1639,14 +2509,14 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
// Restore registers from handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
+ StackHandlerConstants::kFPOffset);
pop(rbp); // FP
STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
+ StackHandlerConstants::kStateOffset);
pop(rdx); // State
STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
+ StackHandlerConstants::kPCOffset);
ret(0);
}
@@ -1688,11 +2558,21 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastElementValue));
+ j(above, fail, distance);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- bool is_heap_object) {
- if (!is_heap_object) {
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
@@ -1700,26 +2580,87 @@ void MacroAssembler::CheckMap(Register obj,
}
+void MacroAssembler::ClampUint8(Register reg) {
+ Label done;
+ testl(reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ setcc(negative, reg); // 1 if negative, 0 if positive.
+ decb(reg); // 0 if negative, 255 if positive.
+ bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+ XMMRegister temp_xmm_reg,
+ Register result_reg,
+ Register temp_reg) {
+ Label done;
+ Set(result_reg, 0);
+ xorps(temp_xmm_reg, temp_xmm_reg);
+ ucomisd(input_reg, temp_xmm_reg);
+ j(below, &done, Label::kNear);
+ uint64_t one_half = BitCast<uint64_t, double>(0.5);
+ Set(temp_reg, one_half);
+ movq(temp_xmm_reg, temp_reg);
+ addsd(temp_xmm_reg, input_reg);
+ cvttsd2si(result_reg, temp_xmm_reg);
+ testl(result_reg, Immediate(0xFFFFFF00));
+ j(zero, &done, Label::kNear);
+ Set(result_reg, 255);
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ movq(descriptors, FieldOperand(map,
+ Map::kInstanceDescriptorsOrBitField3Offset));
+ Label not_smi;
+ JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
+ Move(descriptors, isolate()->factory()->empty_descriptor_array());
+ bind(&not_smi);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ j(equal, success, RelocInfo::CODE_TARGET);
+
+ bind(&fail);
+}
+
+
void MacroAssembler::AbortIfNotNumber(Register object) {
- NearLabel ok;
+ Label ok;
Condition is_smi = CheckSmi(object);
- j(is_smi, &ok);
+ j(is_smi, &ok, Label::kNear);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
+ isolate()->factory()->heap_number_map());
Assert(equal, "Operand not a number");
bind(&ok);
}
void MacroAssembler::AbortIfSmi(Register object) {
- NearLabel ok;
Condition is_smi = CheckSmi(object);
Assert(NegateCondition(is_smi), "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
- NearLabel ok;
+ Condition is_smi = CheckSmi(object);
+ Assert(is_smi, "Operand is not a smi");
+}
+
+
+void MacroAssembler::AbortIfNotSmi(const Operand& object) {
Condition is_smi = CheckSmi(object);
Assert(is_smi, "Operand is not a smi");
}
@@ -1770,10 +2711,10 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(not_equal, miss);
// Make sure that the function has an instance prototype.
- NearLabel non_instance;
+ Label non_instance;
testb(FieldOperand(result, Map::kBitFieldOffset),
Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
+ j(not_zero, &non_instance, Label::kNear);
// Get the prototype or initial map from the function.
movq(result,
@@ -1786,13 +2727,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
j(equal, miss);
// If the function does not have an initial map, we're done.
- NearLabel done;
+ Label done;
CmpObjectType(result, MAP_TYPE, kScratchRegister);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
movq(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
+ jmp(&done, Label::kNear);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
@@ -1806,8 +2747,8 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
- movq(kScratchRegister, ExternalReference(counter));
- movl(Operand(kScratchRegister, 0), Immediate(value));
+ Operand counter_operand = ExternalOperand(ExternalReference(counter));
+ movl(counter_operand, Immediate(value));
}
}
@@ -1815,12 +2756,11 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
ASSERT(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- movq(kScratchRegister, ExternalReference(counter));
- Operand operand(kScratchRegister, 0);
+ Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
- incl(operand);
+ incl(counter_operand);
} else {
- addl(operand, Immediate(value));
+ addl(counter_operand, Immediate(value));
}
}
}
@@ -1829,12 +2769,11 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
ASSERT(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- movq(kScratchRegister, ExternalReference(counter));
- Operand operand(kScratchRegister, 0);
+ Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
- decl(operand);
+ decl(counter_operand);
} else {
- subl(operand, Immediate(value));
+ subl(counter_operand, Immediate(value));
}
}
}
@@ -1844,31 +2783,51 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
Set(rax, 0); // No arguments.
- movq(rbx, ExternalReference(Runtime::kDebugBreak));
+ LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+ // This macro takes the dst register to make the code more readable
+ // at the call sites. However, the dst register has to be rcx to
+ // follow the calling convention which requires the call type to be
+ // in rcx.
+ ASSERT(dst.is(rcx));
+ if (call_kind == CALL_AS_FUNCTION) {
+ LoadSmiConstant(dst, Smi::FromInt(1));
+ } else {
+ LoadSmiConstant(dst, Smi::FromInt(0));
+ }
+}
+
+
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- NearLabel done;
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Label done;
InvokePrologue(expected,
actual,
Handle<Code>::null(),
code,
&done,
flag,
- post_call_generator);
+ Label::kNear,
+ call_wrapper,
+ call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
jmp(code);
}
bind(&done);
@@ -1880,8 +2839,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- NearLabel done;
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Label done;
Register dummy = rax;
InvokePrologue(expected,
actual,
@@ -1889,12 +2849,17 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
dummy,
&done,
flag,
- post_call_generator);
+ Label::kNear,
+ call_wrapper,
+ call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
Call(code, rmode);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
Jump(code, rmode);
}
bind(&done);
@@ -1904,7 +2869,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -1915,14 +2881,15 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, post_call_generator);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@@ -1933,7 +2900,7 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// the Code object every time we call the function.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, post_call_generator);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
} else {
// Invoke the cached code.
Handle<Code> code(function->code());
@@ -1943,7 +2910,79 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
actual,
RelocInfo::CODE_TARGET,
flag,
- post_call_generator);
+ call_wrapper,
+ call_kind);
+ }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_register,
+ Label* done,
+ InvokeFlag flag,
+ Label::Distance near_jump,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ bool definitely_matches = false;
+ Label invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ Set(rax, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for built-ins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ Set(rbx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmpq(expected.reg(), Immediate(actual.immediate()));
+ j(equal, &invoke, Label::kNear);
+ ASSERT(expected.reg().is(rbx));
+ Set(rax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmpq(expected.reg(), actual.reg());
+ j(equal, &invoke, Label::kNear);
+ ASSERT(actual.reg().is(rax));
+ ASSERT(expected.reg().is(rbx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (!code_constant.is_null()) {
+ movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_register.is(rdx)) {
+ movq(rdx, code_register);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ SetCallKind(rcx, call_kind);
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ call_wrapper.AfterCall();
+ jmp(done, near_jump);
+ } else {
+ SetCallKind(rcx, call_kind);
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
}
}
@@ -1955,9 +2994,9 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Push(Smi::FromInt(type));
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
movq(kScratchRegister,
- Factory::undefined_value(),
+ isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
Check(not_equal, "code object not properly patched");
@@ -1966,7 +3005,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, "stack frame types must match");
@@ -1992,16 +3031,12 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
- ExternalReference context_address(Top::k_context_address);
if (save_rax) {
- movq(r14, rax); // Backup rax before we use it.
+ movq(r14, rax); // Backup rax in callee-save register.
}
- movq(rax, rbp);
- store_rax(c_entry_fp_address);
- movq(rax, rsi);
- store_rax(context_address);
+ Store(ExternalReference(Isolate::k_c_entry_fp_address, isolate()), rbp);
+ Store(ExternalReference(Isolate::k_context_address, isolate()), rsi);
}
@@ -2013,7 +3048,6 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
@@ -2027,11 +3061,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
}
// Get the required frame alignment for the OS.
- static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
- movq(kScratchRegister, Immediate(-kFrameAlignment));
- and_(rsp, kScratchRegister);
+ ASSERT(is_int8(kFrameAlignment));
+ and_(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
@@ -2042,10 +3076,10 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
- // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
+ // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r12, Operand(rbp, r14, times_pointer_size, offset));
+ lea(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -2059,7 +3093,7 @@ void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Registers:
- // r12 : argv
+ // r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
@@ -2073,7 +3107,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r12, 1 * kPointerSize));
+ lea(rsp, Operand(r15, 1 * kPointerSize));
// Push the return address to get ready to return.
push(rcx);
@@ -2092,17 +3126,18 @@ void MacroAssembler::LeaveApiExitFrame() {
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Top::k_context_address);
- movq(kScratchRegister, context_address);
- movq(rsi, Operand(kScratchRegister, 0));
+ ExternalReference context_address(Isolate::k_context_address, isolate());
+ Operand context_operand = ExternalOperand(context_address);
+ movq(rsi, context_operand);
#ifdef DEBUG
- movq(Operand(kScratchRegister, 0), Immediate(0));
+ movq(context_operand, Immediate(0));
#endif
// Clear the top frame.
- ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
- movq(kScratchRegister, c_entry_fp_address);
- movq(Operand(kScratchRegister, 0), Immediate(0));
+ ExternalReference c_entry_fp_address(Isolate::k_c_entry_fp_address,
+ isolate());
+ Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
+ movq(c_entry_fp_operand, Immediate(0));
}
@@ -2117,7 +3152,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmpq(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
@@ -2127,9 +3162,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- Factory::global_context_map());
+ isolate()->factory()->global_context_map());
Check(equal, "JSGlobalObject::global_context should be a global context.");
}
@@ -2143,7 +3178,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// object.
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Preserve original value of holder_reg.
push(holder_reg);
movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
@@ -2173,7 +3208,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
@@ -2181,8 +3216,8 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
ASSERT(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
- movq(kScratchRegister, new_space_allocation_top);
- cmpq(result, Operand(kScratchRegister, 0));
+ Operand top_operand = ExternalOperand(new_space_allocation_top);
+ cmpq(result, top_operand);
Check(equal, "Unexpected allocation top");
#endif
return;
@@ -2191,39 +3226,30 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// Move address of new object to result. Use scratch register if available,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
- movq(scratch, new_space_allocation_top);
+ LoadAddress(scratch, new_space_allocation_top);
movq(result, Operand(scratch, 0));
- } else if (result.is(rax)) {
- load_rax(new_space_allocation_top);
} else {
- movq(kScratchRegister, new_space_allocation_top);
- movq(result, Operand(kScratchRegister, 0));
+ Load(result, new_space_allocation_top);
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
testq(result_end, Immediate(kObjectAlignmentMask));
Check(zero, "Unaligned allocation in new space");
}
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Update new top.
- if (result_end.is(rax)) {
- // rax can be stored directly to a memory location.
- store_rax(new_space_allocation_top);
+ if (scratch.is_valid()) {
+ // Scratch already contains address of allocation top.
+ movq(Operand(scratch, 0), result_end);
} else {
- // Register required - use scratch provided if available.
- if (scratch.is_valid()) {
- movq(Operand(scratch, 0), result_end);
- } else {
- movq(kScratchRegister, new_space_allocation_top);
- movq(Operand(kScratchRegister, 0), result_end);
- }
+ Store(new_space_allocation_top, result_end);
}
}
@@ -2235,7 +3261,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
if (result_end.is_valid()) {
@@ -2255,7 +3281,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
Register top_reg = result_end.is_valid() ? result_end : result;
@@ -2264,8 +3290,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
}
addq(top_reg, Immediate(object_size));
j(carry, gc_required);
- movq(kScratchRegister, new_space_allocation_limit);
- cmpq(top_reg, Operand(kScratchRegister, 0));
+ Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ cmpq(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -2293,7 +3319,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
movl(result_end, Immediate(0x7191));
@@ -2312,15 +3338,15 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
// We assume that element_count*element_size + header_size does not
// overflow.
lea(result_end, Operand(element_count, element_size, header_size));
addq(result_end, result);
j(carry, gc_required);
- movq(kScratchRegister, new_space_allocation_limit);
- cmpq(result_end, Operand(kScratchRegister, 0));
+ Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ cmpq(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -2340,7 +3366,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
movl(result_end, Immediate(0x7191));
@@ -2359,14 +3385,14 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
if (!object_size.is(result_end)) {
movq(result_end, object_size);
}
addq(result_end, result);
j(carry, gc_required);
- movq(kScratchRegister, new_space_allocation_limit);
- cmpq(result_end, Operand(kScratchRegister, 0));
+ Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ cmpq(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -2381,16 +3407,16 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, Immediate(~kHeapObjectTagMask));
- movq(kScratchRegister, new_space_allocation_top);
+ Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, Operand(kScratchRegister, 0));
+ cmpq(object, top_operand);
Check(below, "Undo allocation of non allocated memory");
#endif
- movq(Operand(kScratchRegister, 0), object);
+ movq(top_operand, object);
}
@@ -2524,18 +3550,77 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
}
+// Copy memory, byte-by-byte, from source to destination. Not optimized for
+// long or aligned copies. The contents of scratch and length are destroyed.
+// Destination is incremented by length, source, length and scratch are
+// clobbered.
+// A simpler loop is faster on small copies, but slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register destination,
+ Register source,
+ Register length,
+ int min_length,
+ Register scratch) {
+ ASSERT(min_length >= 0);
+ if (FLAG_debug_code) {
+ cmpl(length, Immediate(min_length));
+ Assert(greater_equal, "Invalid min_length");
+ }
+ Label loop, done, short_string, short_loop;
+
+ const int kLongStringLimit = 20;
+ if (min_length <= kLongStringLimit) {
+ cmpl(length, Immediate(kLongStringLimit));
+ j(less_equal, &short_string);
+ }
+
+ ASSERT(source.is(rsi));
+ ASSERT(destination.is(rdi));
+ ASSERT(length.is(rcx));
+
+ // Because source is 8-byte aligned in our uses of this function,
+ // we keep source aligned for the rep movs operation by copying the odd bytes
+ // at the end of the ranges.
+ movq(scratch, length);
+ shrl(length, Immediate(3));
+ repmovsq();
+ // Move remaining bytes of length.
+ andl(scratch, Immediate(0x7));
+ movq(length, Operand(source, scratch, times_1, -8));
+ movq(Operand(destination, scratch, times_1, -8), length);
+ addq(destination, scratch);
+
+ if (min_length <= kLongStringLimit) {
+ jmp(&done);
+
+ bind(&short_string);
+ if (min_length == 0) {
+ testl(length, length);
+ j(zero, &done);
+ }
+ lea(scratch, Operand(destination, length, times_1, 0));
+
+ bind(&short_loop);
+ movb(length, Operand(source, 0));
+ movb(Operand(destination, 0), length);
+ incq(source);
+ incq(destination);
+ cmpq(destination, scratch);
+ j(not_equal, &short_loop);
+
+ bind(&done);
+ }
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
- // The context may be an intermediate context, not a function context.
- movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
@@ -2543,17 +3628,25 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
movq(dst, rsi);
}
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (FLAG_debug_code) {
- cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
+ // We should not have found a with or catch context by walking the context
+ // chain (i.e., the static scope chain and runtime context chain do not
+ // agree). A variable occurring in such a scope should have slot type
+ // LOOKUP and not CONTEXT.
+ if (emit_debug_code()) {
+ CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
+ Heap::kWithContextMapRootIndex);
+ Check(not_equal, "Variable resolved to with context.");
+ CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
+ Heap::kCatchContextMapRootIndex);
+ Check(not_equal, "Variable resolved to catch context.");
}
}
+#ifdef _WIN64
+static const int kRegisterPassedArguments = 4;
+#else
+static const int kRegisterPassedArguments = 6;
+#endif
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
@@ -2569,9 +3662,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
- CheckMap(map, Factory::meta_map(), &fail, false);
+ CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
Abort("Global functions must have initial map");
@@ -2589,11 +3682,10 @@ int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// and the caller does not reserve stack slots for them.
ASSERT(num_arguments >= 0);
#ifdef _WIN64
- static const int kMinimumStackSlots = 4;
+ const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
return num_arguments;
#else
- static const int kRegisterPassedArguments = 6;
if (num_arguments < kRegisterPassedArguments) return 0;
return num_arguments - kRegisterPassedArguments;
#endif
@@ -2604,6 +3696,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
ASSERT(num_arguments >= 0);
+
// Make stack end at alignment and allocate space for arguments and old rsp.
movq(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frame_alignment));
@@ -2617,14 +3710,14 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- movq(rax, function);
+ LoadAddress(rax, function);
CallCFunction(rax, num_arguments);
}
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
// Check stack alignment.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
CheckStackAlignment();
}
@@ -2638,7 +3731,9 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
CodePatcher::CodePatcher(byte* address, int size)
- : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+ : address_(address),
+ size_(size),
+ masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 9557940a9a..f09fafc202 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "assembler.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -44,21 +45,24 @@ enum AllocationFlags {
RESULT_CONTAINS_TOP = 1 << 1
};
+
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
static const Register kScratchRegister = { 10 }; // r10.
-static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
+static const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
static const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
static const int kSmiConstantRegisterValue = 1;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negitive 8-bit displacement values.
+static const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
-class PostCallGenerator;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
@@ -71,13 +75,64 @@ struct SmiIndex {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
-
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+ // Prevent the use of the RootArray during the lifetime of this
+ // scope object.
+ class NoRootArrayScope BASE_EMBEDDED {
+ public:
+ explicit NoRootArrayScope(MacroAssembler* assembler)
+ : variable_(&assembler->root_array_available_),
+ old_value_(assembler->root_array_available_) {
+ assembler->root_array_available_ = false;
+ }
+ ~NoRootArrayScope() {
+ *variable_ = old_value_;
+ }
+ private:
+ bool* variable_;
+ bool old_value_;
+ };
+
+ // Operand pointing to an external reference.
+ // May emit code to set up the scratch register. The operand is
+ // only guaranteed to be correct as long as the scratch register
+ // isn't changed.
+ // If the operand is used more than once, use a scratch register
+ // that is guaranteed not to be clobbered.
+ Operand ExternalOperand(ExternalReference reference,
+ Register scratch = kScratchRegister);
+ // Loads and stores the value of an external reference.
+ // Special case code for load and store to take advantage of
+ // load_rax/store_rax if possible/necessary.
+ // For other operations, just use:
+ // Operand operand = ExternalOperand(extref);
+ // operation(operand, ..);
+ void Load(Register destination, ExternalReference source);
+ void Store(ExternalReference destination, Register source);
+ // Loads the address of the external reference into the destination
+ // register.
+ void LoadAddress(Register destination, ExternalReference source);
+ // Returns the size of the code generated by LoadAddress.
+ // Used by CallSize(ExternalReference) to find the size of a call.
+ int LoadAddressSize(ExternalReference source);
+
+ // Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
+ void StoreRoot(Register source, Heap::RootListIndex index);
+ // Load a root value where the index (or part of it) is variable.
+ // The variable_offset register is added to the fixed_offset value
+ // to get the index into the root-array.
+ void LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset);
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
- void StoreRoot(Register source, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// GC Support
@@ -92,11 +147,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space. The condition cc can be equal or
// not_equal. If it is equal a jump will be done if the object is on new
// space. The register scratch can be object itself, but it will be clobbered.
- template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc,
- LabelType* branch);
+ Label* branch,
+ Label::Distance near_jump = Label::kFar);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -176,40 +231,56 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void InitializeRootRegister() {
+ ExternalReference roots_address =
+ ExternalReference::roots_address(isolate());
+ movq(kRootRegister, roots_address);
+ addq(kRootRegister, Immediate(kRootRegisterBias));
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
+ // Setup call kind marking in rcx. The method takes rcx as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ void SetCallKind(Register dst, CallKind kind);
+
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ const CallWrapper& call_wrapper = NullCallWrapper());
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -262,9 +333,20 @@ class MacroAssembler: public Assembler {
Register src,
int power);
+ // Perform the logical or of two smi values and return a smi value.
+ // If either argument is not a smi, jump to on_not_smis and retain
+ // the original values of source registers. The destination register
+ // may be changed if it's not one of the source registers.
+ void SmiOrIfSmis(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis,
+ Label::Distance near_jump = Label::kFar);
- // Simple comparison of smis.
- void SmiCompare(Register dst, Register src);
+
+ // Simple comparison of smis. Both sides must be known smis to use these,
+ // otherwise use Cmp.
+ void SmiCompare(Register smi1, Register smi2);
void SmiCompare(Register dst, Smi* src);
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
@@ -317,42 +399,45 @@ class MacroAssembler: public Assembler {
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
- template <typename LabelType>
- void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
+ void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
+ Label::Distance near_jump = Label::kFar);
// Jump if the unsigned integer value cannot be represented by a smi.
- template <typename LabelType>
- void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
+ void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value is a tagged smi.
- template <typename LabelType>
- void JumpIfSmi(Register src, LabelType* on_smi);
+ void JumpIfSmi(Register src,
+ Label* on_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value is not a tagged smi.
- template <typename LabelType>
- void JumpIfNotSmi(Register src, LabelType* on_not_smi);
+ void JumpIfNotSmi(Register src,
+ Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value is not a non-negative tagged smi.
- template <typename LabelType>
- void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
+ void JumpUnlessNonNegativeSmi(Register src,
+ Label* on_not_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump to label if the value, which must be a tagged smi, has value equal
// to the constant.
- template <typename LabelType>
void JumpIfSmiEqualsConstant(Register src,
Smi* constant,
- LabelType* on_equals);
+ Label* on_equals,
+ Label::Distance near_jump = Label::kFar);
// Jump if either or both register are not smi values.
- template <typename LabelType>
void JumpIfNotBothSmi(Register src1,
Register src2,
- LabelType* on_not_both_smi);
+ Label* on_not_both_smi,
+ Label::Distance near_jump = Label::kFar);
// Jump if either or both register are not non-negative smi values.
- template <typename LabelType>
void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
- LabelType* on_not_both_smi);
+ Label* on_not_both_smi,
+ Label::Distance near_jump = Label::kFar);
// Operations on tagged smi values.
@@ -362,11 +447,11 @@ class MacroAssembler: public Assembler {
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
- template <typename LabelType>
void SmiTryAddConstant(Register dst,
Register src,
Smi* constant,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
@@ -378,11 +463,11 @@ class MacroAssembler: public Assembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
- template <typename LabelType>
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result. No testing on the result is done. Sets the N and Z flags
@@ -391,27 +476,32 @@ class MacroAssembler: public Assembler {
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
- template <typename LabelType>
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Negating a smi can give a negative zero or too large positive value.
// NOTICE: This operation jumps on success, not failure!
- template <typename LabelType>
void SmiNeg(Register dst,
Register src,
- LabelType* on_smi_result);
+ Label* on_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
- template <typename LabelType>
void SmiAdd(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
+ void SmiAdd(Register dst,
+ Register src1,
+ const Operand& src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiAdd(Register dst,
Register src1,
@@ -420,21 +510,21 @@ class MacroAssembler: public Assembler {
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
- template <typename LabelType>
void SmiSub(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiSub(Register dst,
Register src1,
Register src2);
- template <typename LabelType>
void SmiSub(Register dst,
Register src1,
const Operand& src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiSub(Register dst,
Register src1,
@@ -444,27 +534,27 @@ class MacroAssembler: public Assembler {
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
- template <typename LabelType>
void SmiMul(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
- template <typename LabelType>
void SmiDiv(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
- template <typename LabelType>
void SmiMod(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Bitwise operations.
void SmiNot(Register dst, Register src);
@@ -478,11 +568,11 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value);
- template <typename LabelType>
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -495,11 +585,11 @@ class MacroAssembler: public Assembler {
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
- template <typename LabelType>
void SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smi_result);
+ Label* on_not_smi_result,
+ Label::Distance near_jump = Label::kFar);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
@@ -513,11 +603,11 @@ class MacroAssembler: public Assembler {
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
- template <typename LabelType>
void SelectNonSmi(Register dst,
Register src1,
Register src2,
- LabelType* on_not_smis);
+ Label* on_not_smis,
+ Label::Distance near_jump = Label::kFar);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
@@ -532,6 +622,10 @@ class MacroAssembler: public Assembler {
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
+ // Add the value of a smi in memory to an int32 register.
+ // Sets flags as a normal add.
+ void AddSmiField(Register dst, const Operand& src);
+
// Basic Smi operations.
void Move(Register dst, Smi* source) {
LoadSmiConstant(dst, source);
@@ -549,35 +643,36 @@ class MacroAssembler: public Assembler {
// String macros.
// If object is a string, its map is loaded into object_map.
- template <typename LabelType>
void JumpIfNotString(Register object,
Register object_map,
- LabelType* not_string);
+ Label* not_string,
+ Label::Distance near_jump = Label::kFar);
- template <typename LabelType>
- void JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- LabelType* on_not_both_flat_ascii);
+ void JumpIfNotBothSequentialAsciiStrings(
+ Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_not_both_flat_ascii,
+ Label::Distance near_jump = Label::kFar);
// Check whether the instance type represents a flat ascii string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
- template <typename LabelType>
void JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
- LabelType *on_not_flat_ascii_string);
+ Label*on_not_flat_ascii_string,
+ Label::Distance near_jump = Label::kFar);
- template <typename LabelType>
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
- LabelType* on_fail);
+ Label* on_fail,
+ Label::Distance near_jump = Label::kFar);
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -594,6 +689,8 @@ class MacroAssembler: public Assembler {
void Move(const Operand& dst, Handle<Object> source);
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
+ void Cmp(Register dst, Smi* src);
+ void Cmp(const Operand& dst, Smi* src);
void Push(Handle<Object> source);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -609,7 +706,27 @@ class MacroAssembler: public Assembler {
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
- void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code_object,
+ RelocInfo::Mode rmode,
+ unsigned ast_id = kNoASTId);
+
+ // The size of the code generated for different call instructions.
+ int CallSize(Address destination, RelocInfo::Mode rmode) {
+ return kCallInstructionLength;
+ }
+ int CallSize(ExternalReference ext);
+ int CallSize(Handle<Code> code_object) {
+ // Code calls use 32-bit relative addressing.
+ return kShortCallInstructionLength;
+ }
+ int CallSize(Register target) {
+ // Opcode: REX_opt FF /2 m64
+ return (target.high_bit() != 0) ? 3 : 2;
+ }
+ int CallSize(const Operand& target) {
+ // Opcode: REX_opt FF /2 m64
+ return (target.requires_rex() ? 2 : 1) + target.operand_size();
+ }
// Emit call to the code we are currently generating.
void CallSelf() {
@@ -637,13 +754,27 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- bool is_heap_object);
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
@@ -659,6 +790,15 @@ class MacroAssembler: public Assembler {
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ void ClampUint8(Register reg);
+
+ void ClampDoubleToUint8(XMMRegister input_reg,
+ XMMRegister temp_xmm_reg,
+ Register result_reg,
+ Register temp_reg);
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
@@ -667,6 +807,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ void AbortIfNotSmi(const Operand& object);
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
@@ -830,7 +971,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub);
+ void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
@@ -849,7 +990,7 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -857,7 +998,7 @@ class MacroAssembler: public Assembler {
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::Function* f,
+ MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@@ -906,7 +1047,7 @@ class MacroAssembler: public Assembler {
// Calls an API function. Allocates HandleScope, extracts
// returned value from handle and propagates exceptions.
- // Clobbers r12, r14, rbx and caller-save registers. Restores context.
+ // Clobbers r14, r15, rbx and caller-save registers. Restores context.
// On return removes stack_space * kPointerSize (GCed).
MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
ApiFunction* function, int stack_space);
@@ -941,7 +1082,22 @@ class MacroAssembler: public Assembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- Handle<Object> CodeObject() { return code_object_; }
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
+
+ // Copy length bytes from source to destination.
+ // Uses scratch register internally (if you have a low-eight register
+ // free, do use it, otherwise kScratchRegister will be used).
+ // The min_length is a minimum limit on the value that length will have.
+ // The algorithm has some special cases that might be omitted if the string
+ // is known to always be long.
+ void CopyBytes(Register destination,
+ Register source,
+ Register length,
+ int min_length = 0,
+ Register scratch = kScratchRegister);
// ---------------------------------------------------------------------------
@@ -982,12 +1138,14 @@ class MacroAssembler: public Assembler {
private:
// Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
static int kSafepointPushRegisterIndices[Register::kNumRegisters];
static const int kNumSafepointSavedRegisters = 11;
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
bool allow_stub_calls_;
+ bool root_array_available_;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
@@ -1000,14 +1158,15 @@ class MacroAssembler: public Assembler {
Handle<Object> code_object_;
// Helper functions for generating invokes.
- template <typename LabelType>
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_register,
- LabelType* done,
+ Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator);
+ Label::Distance near_jump = Label::kFar,
+ const CallWrapper& call_wrapper = NullCallWrapper(),
+ CallKind call_kind = CALL_AS_METHOD);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1072,17 +1231,6 @@ class CodePatcher {
};
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
-};
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1144,713 +1292,6 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-// -----------------------------------------------------------------------------
-// Template implementations.
-
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
-
-template <typename LabelType>
-void MacroAssembler::SmiNeg(Register dst,
- Register src,
- LabelType* on_smi_result) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- LabelType* on_not_smi_result) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- NearLabel failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- NearLabel correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- j(positive, &zero_correct_result); // Result was positive zero.
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
-
- bind(&zero_correct_result);
- Set(dst, 0);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- NearLabel correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result);
- bind(&correct_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- ASSERT_EQ(0, kSmiTag);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- LabelType* on_not_smi_result) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result);
- movq(dst, kScratchRegister);
- }
- } else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result);
- }
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- NearLabel positive_divisor;
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- NearLabel safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- } else {
- j(negative, on_not_smi_result);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- NearLabel smi_result;
- j(zero, &smi_result);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&smi_result);
- } else {
- j(not_zero, on_not_smi_result);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- Integer32ToSmi(dst, rax);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMod(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- NearLabel safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- NearLabel smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result);
- testq(src1, src1);
- j(negative, on_not_smi_result);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRightConstant(
- Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smi_result) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
- NearLabel result_ok;
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- NearLabel positive_result;
- j(positive, &positive_result);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result);
- bind(&positive_result);
- } else {
- j(negative, on_not_smi_result); // src2 was zero and src1 negative.
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- LabelType* on_not_smis) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- Condition smi = CheckSmi(src);
- j(smi, on_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessNonNegativeSmi(
- Register src, LabelType* on_not_smi_or_negative) {
- Condition non_negative_smi = CheckNonNegativeSmi(src);
- j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- LabelType* on_equals) {
- SmiCompare(src, constant);
- j(equal, on_equals);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
- LabelType* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- LabelType* on_invalid) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
- Register src2,
- LabelType* on_not_both_smi) {
- Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotString(Register object,
- Register object_map,
- LabelType* not_string) {
- Condition is_smi = CheckSmi(object);
- j(is_smi, not_string);
- CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
- j(above_equal, not_string);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- LabelType *failure) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
- j(not_equal, failure);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- LabelType* on_fail) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ascii strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- LabelType* branch) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask());
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask());
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(scratch, kScratchRegister);
- j(cc, branch);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
- j(cc, branch);
- }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- LabelType* done,
- InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
- bool definitely_matches = false;
- NearLabel invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
- jmp(done);
- } else {
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
} } // namespace v8::internal
#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index cd3bfbd42c..2ea17f0e96 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -63,11 +63,16 @@ namespace internal {
*
* The registers rax, rbx, r9 and r11 are free to use for computations.
* If changed to use r12+, they should be saved as callee-save registers.
+ * The macro assembler special registers r12 and r13 (kSmiConstantRegister,
+ * kRootRegister) aren't special during execution of RegExp code (they don't
+ * hold the values assumed when creating JS code), so no Smi or Root related
+ * macro operations can be used.
*
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
+ * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
@@ -104,12 +109,13 @@ namespace internal {
* bool direct_call)
*/
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM((&masm_))
RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
+ no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4),
mode_(mode),
num_registers_(registers_to_save),
@@ -126,7 +132,6 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -397,13 +402,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
__ push(backtrack_stackpointer());
- static const int num_arguments = 3;
+ static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
// Put arguments into parameter registers. Parameters are
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
+ // Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
__ lea(rcx, Operand(rsi, rdx, times_1, 0));
@@ -411,6 +417,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ lea(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
__ movq(r8, rbx);
+ // Isolate.
+ __ LoadAddress(r9, ExternalReference::isolate_address());
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
__ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -420,13 +428,15 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ movq(rsi, rax);
// Set byte_length.
__ movq(rdx, rbx);
+ // Isolate.
+ __ LoadAddress(rcx, ExternalReference::isolate_address());
#endif
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16();
+ ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
__ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
__ pop(backtrack_stackpointer());
#ifndef _WIN64
__ pop(rdi);
@@ -693,7 +703,7 @@ void RegExpMacroAssemblerX64::Fail() {
}
-Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
// Entry code:
@@ -740,7 +750,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_.isolate());
__ movq(rcx, rsp);
__ movq(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
@@ -752,11 +762,11 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
- __ movq(rax, Immediate(EXCEPTION));
+ __ Set(rax, EXCEPTION);
__ jmp(&exit_label_);
__ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
@@ -789,7 +799,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
- __ movq(rcx, Immediate(kRegisterZero));
+ __ Set(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ movq(Operand(rbp, rcx, times_1, 0), rax);
@@ -811,7 +821,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Initialize backtrack stack pointer.
__ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
// Initialize code object pointer.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
// Load previous char as initial value of current-character.
Label at_start;
__ cmpb(Operand(rbp, kStartIndex), Immediate(0));
@@ -819,7 +829,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
- __ movq(current_character(), Immediate('\n'));
+ __ Set(current_character(), '\n');
__ jmp(&start_label_);
@@ -847,7 +857,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movl(Operand(rbx, i * kIntSize), rax);
}
}
- __ movq(rax, Immediate(SUCCESS));
+ __ Set(rax, SUCCESS);
}
// Exit and return rax
@@ -892,7 +902,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(not_zero, &exit_label_);
// Restore registers.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
__ pop(rdi);
__ pop(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
@@ -914,18 +924,21 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#endif
// Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 2;
+ static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments);
#ifdef _WIN64
- // Microsoft passes parameters in rcx, rdx.
+ // Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ LoadAddress(r8, ExternalReference::isolate_address());
#else
- // AMD64 ABI passes parameters in rdi, rsi.
+ // AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movq(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ LoadAddress(rdx, ExternalReference::isolate_address());
#endif
- ExternalReference grow_stack = ExternalReference::re_grow_stack();
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_.isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -934,7 +947,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Otherwise use return value as new stack pointer.
__ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
@@ -946,19 +959,20 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// If any of the code above needed to exit with an exception.
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ movq(rax, Immediate(EXCEPTION));
+ __ Set(rax, EXCEPTION);
__ jmp(&exit_label_);
}
FixupCodeRelativePositions();
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = Factory::NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
+ masm_.GetCode(&code_desc);
+ Isolate* isolate = ISOLATE;
+ Handle<Code> code = isolate->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP),
+ masm_.CodeObject());
+ PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
}
@@ -1051,9 +1065,9 @@ void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
- NearLabel after_position;
+ Label after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
- __ j(greater_equal, &after_position);
+ __ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded.
@@ -1126,7 +1140,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ lea(rdi, Operand(rsp, -kPointerSize));
#endif
ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state();
+ ExternalReference::re_check_stack_guard_state(masm_.isolate());
__ CallCFunction(stack_check, num_arguments);
}
@@ -1141,8 +1155,10 @@ static T& frame_entry(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
- if (StackGuard::IsStackOverflow()) {
- Top::StackOverflow();
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
return EXCEPTION;
}
@@ -1289,8 +1305,8 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
// Patch the relative offset to be relative to the Code object pointer
// instead.
int patch_position = position - kIntSize;
- int offset = masm_->long_at(patch_position);
- masm_->long_at_put(patch_position,
+ int offset = masm_.long_at(patch_position);
+ masm_.long_at_put(patch_position,
offset
+ position
+ Code::kHeaderSize
@@ -1324,7 +1340,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
// Check for preemption.
Label no_preempt;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit();
+ ExternalReference::address_of_stack_limit(masm_.isolate());
__ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
@@ -1338,7 +1354,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
void RegExpMacroAssemblerX64::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
+ ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
__ load_rax(stack_limit);
__ cmpq(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 421a229447..02b510fa07 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -75,7 +75,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
@@ -104,7 +104,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Handle<String> subject,
int* offsets_vector,
int offsets_vector_length,
- int previous_index);
+ int previous_index,
+ Isolate* isolate);
static Result Execute(Code* code,
String* input,
@@ -142,6 +143,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
@@ -153,6 +155,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kInputEnd - kPointerSize;
static const int kStackHighEnd = kRegisterOutput - kPointerSize;
static const int kDirectCall = kFrameAlign;
+ static const int kIsolate = kDirectCall + kPointerSize;
#endif
#ifdef _WIN64
@@ -215,7 +218,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_->pc_offset());
+ code_relative_fixup_positions_.Add(masm_.pc_offset());
}
void FixupCodeRelativePositions();
@@ -247,7 +250,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
- MacroAssembler* masm_;
+ MacroAssembler masm_;
+ MacroAssembler::NoRootArrayScope no_root_array_scope_;
ZoneList<int> code_relative_fixup_positions_;
diff --git a/deps/v8/src/x64/register-allocator-x64-inl.h b/deps/v8/src/x64/register-allocator-x64-inl.h
deleted file mode 100644
index c6bea3ab09..0000000000
--- a/deps/v8/src/x64/register-allocator-x64-inl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
- reg.is(kScratchRegister) || reg.is(kRootRegister) ||
- reg.is(kSmiConstantRegister);
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // rax
- 2, // rcx
- 3, // rdx
- 1, // rbx
- -1, // rsp Stack pointer.
- -1, // rbp Frame pointer.
- -1, // rsi Context.
- 4, // rdi
- 5, // r8
- 6, // r9
- -1, // r10 Scratch register.
- 8, // r11
- 9, // r12
- -1, // r13 Roots array. This is callee saved.
- 7, // r14
- -1 // r15 Smi constant register.
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved rdi register is live on JS function entry.
- Use(rdi); // JS function.
-}
-} } // namespace v8::internal
-
-#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/deps/v8/src/x64/register-allocator-x64.cc b/deps/v8/src/x64/register-allocator-x64.cc
deleted file mode 100644
index 1f5467e130..0000000000
--- a/deps/v8/src/x64/register-allocator-x64.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- ASSERT(is_valid());
- if (is_constant()) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
- // This result becomes a copy of the fresh one.
- fresh.set_type_info(type_info());
- *this = fresh;
- }
- ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
- ASSERT(is_valid());
- if (!is_register() || !reg().is(target)) {
- Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
- ASSERT(fresh.is_valid());
- if (is_register()) {
- CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
- } else {
- ASSERT(is_constant());
- CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
- }
- fresh.set_type_info(type_info());
- *this = fresh;
- } else if (is_register() && reg().is(target)) {
- ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
- CodeGeneratorScope::Current()->frame()->Spill(target);
- ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
- }
- ASSERT(is_register());
- ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // This function is not used in 64-bit code.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/register-allocator-x64.h b/deps/v8/src/x64/register-allocator-x64.h
deleted file mode 100644
index a2884d9128..0000000000
--- a/deps/v8/src/x64/register-allocator-x64.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- static const int kNumRegisters = 10;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index aa2994f267..df8423a654 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -40,12 +40,12 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
+ const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
@@ -55,7 +55,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
return c_limit;
}
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 109985c726..da27fdf050 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -39,7 +39,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(MacroAssembler* masm,
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
@@ -48,10 +49,10 @@ static void ProbeTable(MacroAssembler* masm,
ASSERT_EQ(16, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
- ExternalReference key_offset(SCTableReference::keyReference(table));
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
Label miss;
- __ movq(kScratchRegister, key_offset);
+ __ LoadAddress(kScratchRegister, key_offset);
// Check that the key in the entry matches the name.
// Multiply entry offset by 16 to get the entry address. Since the
// offset register already holds the entry offset times four, multiply
@@ -81,17 +82,18 @@ static void ProbeTable(MacroAssembler* masm,
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
// Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- String* name,
- Register r0,
- Register r1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register r1) {
ASSERT(name->IsSymbol());
- __ IncrementCounter(&Counters::negative_lookups, 1);
- __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
- Label done;
__ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
@@ -103,7 +105,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -115,64 +117,20 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Heap::kHashTableMapRootIndex);
__ j(not_equal, miss_label);
- // Compute the capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- // Generate an unrolled loop that performs a few probes before
- // giving up.
- static const int kProbes = 4;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = 0; i < kProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r1;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r1;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties, index, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, Factory::undefined_value());
- // __ jmp(miss_label);
- if (i != kProbes - 1) {
- __ j(equal, &done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
- __ j(equal, miss_label);
-
- // Check if the entry name is not a symbol.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, miss_label);
- } else {
- // Give up probing if still not found the undefined value.
- __ j(not_equal, miss_label);
- }
- }
+ Label done;
+ MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+ masm,
+ miss_label,
+ &done,
+ properties,
+ name,
+ r1);
+ if (result->IsFailure()) return result;
__ bind(&done);
- __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+ return result;
}
@@ -183,6 +141,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
+ Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
@@ -212,7 +171,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -224,7 +183,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@@ -253,13 +212,15 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
// Check we're still in the same context.
- __ Move(prototype, Top::global());
+ __ Move(prototype, isolate->global());
__ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
prototype);
__ j(not_equal, miss);
// Get the global function with the given index.
- JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
+ JSFunction* function =
+ JSFunction::cast(isolate->global_context()->get(index));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@@ -375,7 +336,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
__ Move(kScratchRegister, Handle<Object>(interceptor));
__ push(kScratchRegister);
__ push(receiver);
@@ -392,9 +353,10 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
- __ movq(rax, Immediate(5));
- __ movq(rbx, ref);
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+ masm->isolate());
+ __ Set(rax, 5);
+ __ LoadAddress(rbx, ref);
CEntryStub stub(1);
__ CallStub(&stub);
@@ -466,7 +428,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (Heap::InNewSpace(call_data)) {
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
__ Move(rcx, api_call_info_handle);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
@@ -516,10 +478,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
- Register name)
+ Register name,
+ Code::ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name) {}
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
MaybeObject* Compile(MacroAssembler* masm,
JSObject* object,
@@ -561,7 +525,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
- return Heap::undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
}
@@ -597,10 +561,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
(depth2 != kInvalidProtoDepth);
}
- __ IncrementCounter(&Counters::call_const_interceptor, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->call_const_interceptor(), 1);
if (can_do_fast_api_call) {
- __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
ReserveSpaceForFastApiCall(masm, scratch1);
}
@@ -643,8 +608,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_.immediate());
if (result->IsFailure()) return result;
} else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -660,7 +628,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm, scratch1);
}
- return Heap::undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -688,7 +656,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
interceptor_holder);
__ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+ masm->isolate()),
5);
// Restore the name_ register.
@@ -722,6 +691,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
+ Code::ExtraICState extra_ic_state_;
};
@@ -729,9 +699,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
} else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
}
Handle<Code> ic(code);
@@ -739,6 +709,14 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+ Code* code = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -776,7 +754,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(rax);
__ push(scratch);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
return;
}
@@ -836,7 +817,7 @@ MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, Handle<Object>(cell));
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Factory::the_hole_value());
+ masm->isolate()->factory()->the_hole_value());
__ j(not_equal, miss);
return cell;
}
@@ -885,7 +866,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* lookup_result = Heap::LookupSymbol(name);
+ MaybeObject* lookup_result = heap()->LookupSymbol(name);
if (lookup_result->IsFailure()) {
set_failure(Failure::cast(lookup_result));
return reg;
@@ -896,16 +877,21 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
ASSERT(current->property_dictionary()->FindEntry(name) ==
StringDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(),
- miss,
- reg,
- name,
- scratch1,
- scratch2);
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ if (negative_lookup->IsFailure()) {
+ set_failure(Failure::cast(negative_lookup));
+ return reg;
+ }
+
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (Heap::InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ Cmp(scratch1, Handle<Map>(current->map()));
@@ -956,7 +942,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
__ j(not_equal, miss);
// Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return
// the holder register.
@@ -1039,7 +1025,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
__ push(receiver); // receiver
__ push(reg); // holder
- if (Heap::InNewSpace(callback_handle->data())) {
+ if (heap()->InNewSpace(callback_handle->data())) {
__ Move(scratch1, callback_handle);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
@@ -1230,7 +1216,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ push(scratch2); // restore return address
ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+ isolate());
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
@@ -1245,7 +1232,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ push(scratch2); // restore old return address
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
__ TailCallExternalReference(ref, 5, 1);
}
}
@@ -1291,7 +1278,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (Heap::InNewSpace(function)) {
+ if (heap()->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1313,8 +1300,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
- kind_);
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_ic_state_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1365,14 +1354,16 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
}
// Invoke the function.
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(FIELD, name);
@@ -1393,7 +1384,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss;
@@ -1427,7 +1418,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::fixed_array_map());
+ factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
@@ -1477,14 +1468,13 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top.
- __ movq(rcx, new_space_allocation_top);
- __ movq(rcx, Operand(rcx, 0));
+ __ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
__ lea(rdx, FieldOperand(rbx,
@@ -1493,13 +1483,13 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- __ movq(kScratchRegister, new_space_allocation_limit);
- __ cmpq(rcx, Operand(kScratchRegister, 0));
+ Operand limit_operand =
+ masm()->ExternalOperand(new_space_allocation_limit);
+ __ cmpq(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(Operand(kScratchRegister, 0), rcx);
+ __ Store(new_space_allocation_top, rcx);
__ movq(rcx, Operand(rsp, argc * kPointerSize));
// Push the argument...
@@ -1526,16 +1516,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
}
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+ isolate()),
argc + 1,
1);
}
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1556,7 +1545,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss, return_undefined, call_builtin;
@@ -1611,15 +1600,14 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
- argc + 1,
- 1);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPop, isolate()),
+ argc + 1,
+ 1);
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1641,7 +1629,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1650,7 +1638,9 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1700,10 +1690,8 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1725,7 +1713,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return Heap::undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1734,7 +1722,9 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
@@ -1786,10 +1776,8 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -1814,7 +1802,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1853,14 +1841,16 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
__ bind(&miss);
// rcx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
@@ -1873,7 +1863,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
JSFunction* function,
String* name) {
// TODO(872): implement this.
- return Heap::undefined_value();
+ return heap()->undefined_value();
}
@@ -1894,7 +1884,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1943,7 +1933,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a heap number and load its value.
__ bind(&not_smi);
- __ CheckMap(rax, Factory::heap_number_map(), &slow, true);
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
@@ -1968,20 +1958,81 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
__ bind(&miss);
// rcx: function name.
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ ASSERT(optimization.is_simple_api_call());
+ // Bail out if object is a global object as we don't want to
+ // repatch it to global receiver.
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSObject()) return heap()->undefined_value();
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+ Label miss, miss_before_stack_reserved;
+
+ GenerateNameCheck(name, &miss_before_stack_reserved);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(rdx, &miss_before_stack_reserved);
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->call_const(), 1);
+ __ IncrementCounter(counters->call_const_fast_api(), 1);
+
+ // Allocate space for v8::Arguments implicit values. Must be initialized
+ // before calling any runtime function.
+ __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
+
+ // Check that the maps haven't changed and find a Holder as a side effect.
+ CheckPrototypes(JSObject::cast(object), rdx, holder,
+ rbx, rax, rdi, name, depth, &miss);
+
+ // Move the return address on top of the stack.
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ __ movq(Operand(rsp, 0 * kPointerSize), rax);
+
+ MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
+
+ __ bind(&miss);
+ __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
+
+ __ bind(&miss_before_stack_reserved);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1997,20 +2048,18 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, NULL, function, name);
+ object, holder, NULL, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) return result;
}
- Label miss_in_smi_check;
+ Label miss;
- GenerateNameCheck(name, &miss_in_smi_check);
+ GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
const int argc = arguments().immediate();
@@ -2018,42 +2067,26 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss_in_smi_check);
+ __ JumpIfSmi(rdx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- CallOptimization optimization(function);
- int depth = kInvalidProtoDepth;
- Label miss;
-
+ Counters* counters = isolate()->counters();
+ SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(&Counters::call_const, 1);
-
- if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
- depth = optimization.GetPrototypeDepthOfExpectedType(
- JSObject::cast(object), holder);
- }
-
- if (depth != kInvalidProtoDepth) {
- __ IncrementCounter(&Counters::call_const_fast_api, 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before to call any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- }
+ __ IncrementCounter(counters->call_const(), 1);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), rdx, holder,
- rbx, rax, rdi, name, depth, &miss);
+ rbx, rax, rdi, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
- ASSERT(depth == kInvalidProtoDepth);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
@@ -2123,31 +2156,16 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- if (depth != kInvalidProtoDepth) {
- // Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- __ movq(Operand(rsp, 0 * kPointerSize), rax);
-
- // rsp[2 * kPointerSize] is uninitialized, rsp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
- if (result->IsFailure()) return result;
- } else {
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
- }
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
- if (depth != kInvalidProtoDepth) {
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- }
-
- // Handle call cache miss.
- __ bind(&miss_in_smi_check);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(function);
@@ -2179,7 +2197,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), rcx);
+ CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
@@ -2209,14 +2227,16 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Invoke the function.
__ movq(rdi, rax);
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
// Handle load cache miss.
__ bind(&miss);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(INTERCEPTOR, name);
@@ -2238,11 +2258,9 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- SharedFunctionInfo* function_info = function->shared();
- if (function_info->HasBuiltinFunctionId()) {
- BuiltinFunctionId id = function_info->builtin_function_id();
+ if (HasCustomCallGenerator(function)) {
MaybeObject* maybe_result = CompileCustomCall(
- id, object, holder, cell, function, name);
+ object, holder, cell, function, name);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// undefined means bail out to regular compiler.
@@ -2270,27 +2288,31 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->call_global_inline(), 1);
ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
+ __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
} else {
Handle<Code> code(function->code());
__ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
}
// Handle call cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::call_global_inline_miss, 1);
- Object* obj;
- { MaybeObject* maybe_obj = GenerateMissBranch();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ __ IncrementCounter(counters->call_global_inline_miss(), 1);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
// Return the generated code.
return GetCode(NORMAL, name);
@@ -2319,7 +2341,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2364,12 +2386,12 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2413,12 +2435,12 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2455,13 +2477,14 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2481,7 +2504,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_store_field, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
__ Cmp(rcx, Handle<String>(name));
@@ -2497,8 +2521,8 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_store_field, 1);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ DecrementCounter(counters->keyed_store_field(), 1);
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2506,56 +2530,22 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map matches.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &miss);
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ j(not_equal, &miss);
-
- // Check that the key is within bounds.
- if (receiver->IsJSArray()) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ j(above_equal, &miss);
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss);
- }
-
- // Do the store and update the write barrier. Make sure to preserve
- // the value in register eax.
- __ movq(rdx, rax);
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ RecordWrite(rdi, 0, rdx, rcx);
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(rdx,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2563,8 +2553,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
-MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
- JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -2572,31 +2563,26 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
// -- rsp[0] : return address
// -----------------------------------
Label miss;
+ __ JumpIfSmi(rdx, &miss);
- // Check that the map matches.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
-
- // Do the load.
- GenerateFastPixelArrayStore(masm(),
- rdx,
- rcx,
- rax,
- rdi,
- rbx,
- true,
- false,
- &miss,
- &miss,
- NULL,
- &miss);
+ Register map_reg = rbx;
+ __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ // Check map and tail call if there's a match
+ Handle<Map> map(receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ __ j(equal,
+ Handle<Code>(handler_ics->at(current)),
+ RelocInfo::CODE_TARGET);
+ }
- // Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -2610,7 +2596,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
// -----------------------------------
Label miss;
- // Chech that receiver is not a smi.
+ // Check that receiver is not a smi.
__ JumpIfSmi(rax, &miss);
// Check the maps of the full prototype chain. Also check that
@@ -2641,7 +2627,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, Heap::empty_string());
+ return GetCode(NONEXISTENT, heap()->empty_string());
}
@@ -2780,12 +2766,13 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- __ IncrementCounter(&Counters::named_load_global_stub, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
__ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
@@ -2804,7 +2791,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_field, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2813,7 +2801,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_field, 1);
+ __ DecrementCounter(counters->keyed_load_field(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2833,7 +2821,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_callback, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2848,7 +2837,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ __ DecrementCounter(counters->keyed_load_callback(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2867,7 +2856,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2876,7 +2866,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
value, name, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ __ DecrementCounter(counters->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2894,7 +2884,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2913,7 +2904,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
name,
&miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ __ DecrementCounter(counters->keyed_load_interceptor(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2929,7 +2920,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2937,7 +2929,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ __ DecrementCounter(counters->keyed_load_array_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2953,7 +2945,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2961,7 +2954,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ __ DecrementCounter(counters->keyed_load_string_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2977,7 +2970,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
@@ -2985,7 +2979,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
- __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
@@ -2993,78 +2987,56 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- esp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Check that the map matches.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &miss);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(rdx,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
- // -- esp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
Label miss;
+ __ JumpIfSmi(rdx, &miss);
- // Check that the map matches.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
-
- GenerateFastPixelArrayLoad(masm(),
- rdx,
- rax,
- rbx,
- rcx,
- rax,
- &miss,
- &miss,
- &miss);
+ Register map_reg = rbx;
+ __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int current = 0; current < receiver_count; ++current) {
+ // Check map and tail call if there's a match
+ Handle<Map> map(receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ __ j(equal,
+ Handle<Code>(handler_ics->at(current)),
+ RelocInfo::CODE_TARGET);
+ }
- __ bind(&miss);
+ __ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(NORMAL, NULL);
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
}
@@ -3080,7 +3052,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
Label generic_stub_call;
// Use r8 for holding undefined which is used in several places below.
- __ Move(r8, Factory::undefined_value());
+ __ Move(r8, factory()->undefined_value());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
@@ -3124,7 +3096,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// rbx: initial map
// rdx: JSObject (untagged)
__ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, Factory::empty_fixed_array());
+ __ Move(rbx, factory()->empty_fixed_array());
__ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
__ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
@@ -3183,14 +3155,16 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
- __ IncrementCounter(&Counters::constructed_objects, 1);
- __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
+ __ IncrementCounter(counters->constructed_objects_stub(), 1);
__ ret(0);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
- Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Code* code =
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -3199,45 +3173,32 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow;
+ Label slow, miss_force_generic;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
-
- // Check that the object is a JS object.
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: index (as a smi)
- // rdx: JSObject
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
+ __ JumpIfNotSmi(rax, &miss_force_generic);
// Check that the index is in range.
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rcx, rax);
__ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
+ __ j(above_equal, &miss_force_generic);
// rax: index (as a smi)
// rdx: receiver (JSObject)
@@ -3245,28 +3206,32 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
+ break;
default:
UNREACHABLE();
break;
@@ -3280,13 +3245,13 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
- NearLabel box_int;
+ Label box_int;
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
@@ -3304,7 +3269,8 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
@@ -3319,7 +3285,8 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// Slow case: Jump to runtime.
__ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
// -- rax : key
@@ -3327,60 +3294,46 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+ // Miss case: Jump to runtime.
+ __ bind(&miss_force_generic);
- // Return the generated code.
- return GetCode(flags);
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow;
+ Label slow, miss_force_generic;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
- // Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
-
- // Check that the object is a JS object.
- __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &miss_force_generic);
// Check that the index is in range.
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rdi, rcx); // Untag the index.
__ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
+ __ j(above_equal, &miss_force_generic);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
@@ -3389,92 +3342,127 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
- NearLabel check_heap_number;
- __ JumpIfNotSmi(rax, &check_heap_number);
+ Label check_heap_number;
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // Float to pixel conversion is only implemented in the runtime for now.
+ __ JumpIfNotSmi(rax, &slow);
+ } else {
+ __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
+ }
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ { // Clamp the value to [0..255].
+ Label done;
+ __ testl(rdx, Immediate(0xFFFFFF00));
+ __ j(zero, &done, Label::kNear);
+ __ setcc(negative, rdx); // 1 if negative, 0 if positive.
+ __ decb(rdx); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
- default:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ // Need to perform int-to-float conversion.
+ __ cvtlsi2sd(xmm0, rdx);
+ __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
__ ret(0);
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // Convert to int32 and store the low byte/word.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- // rdx: value (converted to an untagged integer)
+ // TODO(danno): handle heap number -> pixel array conversion
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ __ bind(&check_heap_number);
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
+ __ j(not_equal, &slow);
+ // No more branches to slow case on this path.
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ cvttsd2si(rdx, xmm0);
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ cvttsd2si(rdx, xmm0);
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // Convert to int64, so that NaN and infinities become
- // 0x8000000000000000, which is zero mod 2^32.
- __ cvttsd2siq(rdx, xmm0);
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
+ // top of FPU stack: value
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvtsd2ss(xmm0, xmm0);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ __ ret(0);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // Convert to int32 and store the low byte/word.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ // rdx: value (converted to an untagged integer)
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ cvttsd2si(rdx, xmm0);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ cvttsd2si(rdx, xmm0);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ // Convert to int64, so that NaN and infinities become
+ // 0x8000000000000000, which is zero mod 2^32.
+ __ cvttsd2siq(rdx, xmm0);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
+ __ ret(0);
}
- __ ret(0);
}
// Slow case: call runtime.
@@ -3487,21 +3475,116 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(
- Code::ExtractExtraICStateFromFlags(flags) & kStrictMode));
- __ push(rbx); // return address
+ Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic, RelocInfo::CODE_TARGET);
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+ // Miss case: call runtime.
+ __ bind(&miss_force_generic);
- return GetCode(flags);
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &miss_force_generic);
+
+ // Get the elements array.
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ AssertFastElements(rcx);
+
+ // Check that the key is within bounds.
+ __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+
+ // Load the result and make sure it's not the hole.
+ SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+ __ movq(rbx, FieldOperand(rcx,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &miss_force_generic);
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(&miss_force_generic);
+ Code* code = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ Handle<Code> ic(code);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
}
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &miss_force_generic);
+
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &miss_force_generic);
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+ } else {
+ __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+ }
+
+ // Do the store and update the write barrier. Make sure to preserve
+ // the value in register eax.
+ __ movq(rdx, rax);
+ __ SmiToInteger32(rcx, rcx);
+ __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ RecordWrite(rdi, 0, rdx, rcx);
+
+ // Done.
+ __ ret(0);
+
+ // Handle store cache miss.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic_force_generic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
deleted file mode 100644
index c4d7e65663..0000000000
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ /dev/null
@@ -1,1292 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::Enter() {
- // Registers live on entry to a JS frame:
- // rsp: stack pointer, points to return address from this function.
- // rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
- // Trampoline frame.
- // rsi: context of this function call.
- // rdi: pointer to this function object.
- Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
- if (FLAG_debug_code) {
- // Verify that rdi contains a JS function. The following code
- // relies on rax being available for use.
- Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
- __ Check(not_smi,
- "VirtualFrame::Enter - rdi is not a function (smi check).");
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ Check(equal,
- "VirtualFrame::Enter - rdi is not a function (map check).");
- }
-#endif
-
- EmitPush(rbp);
-
- __ movq(rbp, rsp);
-
- // Store the context in the frame. The context is kept in rsi and a
- // copy is stored in the frame. The external reference to rsi
- // remains.
- EmitPush(rsi);
-
- // Store the function in the frame. The frame owns the register
- // reference now (ie, it can keep it in rdi or spill it later).
- Push(rdi);
- SyncElementAt(element_count() - 1);
- cgen()->allocator()->Unuse(rdi);
-}
-
-
-void VirtualFrame::Exit() {
- Comment cmnt(masm(), "[ Exit JS frame");
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See GenerateReturnSequence for the full return sequence.
- // TODO(X64): A patched call will be very long now. Make sure we
- // have enough room.
- __ movq(rsp, rbp);
- stack_pointer_ = frame_pointer();
- for (int i = element_count() - 1; i > stack_pointer_; i--) {
- FrameElement last = elements_.RemoveLast();
- if (last.is_register()) {
- Unuse(last.reg());
- }
- }
-
- EmitPop(rbp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- // The locals are initialized to a constant (the undefined value), but
- // we sync them with the actual frame to allocate space for spilling
- // them later. First sync everything above the stack pointer so we can
- // use pushes to allocate and initialize the locals.
- SyncRange(stack_pointer_ + 1, element_count() - 1);
- Handle<Object> undefined = Factory::undefined_value();
- FrameElement initial_value =
- FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count < kLocalVarBound) {
- // For fewer locals the unrolled loop is more compact.
-
- // Hope for one of the first eight registers, where the push operation
- // takes only one byte (kScratchRegister needs the REX.W bit).
- Result tmp = cgen()->allocator()->Allocate();
- ASSERT(tmp.is_valid());
- __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
- for (int i = 0; i < count; i++) {
- __ push(tmp.reg());
- }
- } else {
- // For more locals a loop in generated code is more compact.
- Label alloc_locals_loop;
- Result cnt = cgen()->allocator()->Allocate();
- ASSERT(cnt.is_valid());
- __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
-#ifdef DEBUG
- Label loop_size;
- __ bind(&loop_size);
-#endif
- if (is_uint8(count)) {
- // Loading imm8 is shorter than loading imm32.
- // Loading only partial byte register, and using decb below.
- __ movb(cnt.reg(), Immediate(count));
- } else {
- __ movl(cnt.reg(), Immediate(count));
- }
- __ bind(&alloc_locals_loop);
- __ push(kScratchRegister);
- if (is_uint8(count)) {
- __ decb(cnt.reg());
- } else {
- __ decl(cnt.reg());
- }
- __ j(not_zero, &alloc_locals_loop);
-#ifdef DEBUG
- CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
-#endif
- }
- for (int i = 0; i < count; i++) {
- elements_.Add(initial_value);
- stack_pointer_++;
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ movq(Operand(rbp, fp_relative(context_index())), rsi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- ASSERT(elements_[context_index()].is_memory());
- __ movq(rsi, Operand(rbp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ lea(temp.reg(), ParameterAt(-1));
- Push(&temp);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(const Operand& operand) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ push(immediate);
-}
-
-
-void VirtualFrame::EmitPush(Smi* smi_value) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
- stack_pointer_++;
- __ Push(smi_value);
-}
-
-
-void VirtualFrame::EmitPush(Handle<Object> value) {
- ASSERT(stack_pointer_ == element_count() - 1);
- TypeInfo info = TypeInfo::TypeFromValue(value);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ Push(value);
-}
-
-
-void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(info));
- stack_pointer_++;
- __ PushRoot(index);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
- ASSERT(expr->IsTrivial());
-
- Literal* lit = expr->AsLiteral();
- if (lit != NULL) {
- Push(lit->handle());
- return;
- }
-
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL) {
- Slot* slot = proxy->var()->AsSlot();
- if (slot->type() == Slot::LOCAL) {
- PushLocalAt(slot->index());
- return;
- }
- if (slot->type() == Slot::PARAMETER) {
- PushParameterAt(slot->index());
- return;
- }
- }
- UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
- if (ConstantPoolOverflowed()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- if (value->IsSmi()) {
- __ Move(temp.reg(), Smi::cast(*value));
- } else {
- __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
- }
- Push(&temp);
- } else {
- FrameElement element =
- FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
- elements_.Add(element);
- }
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ addq(rsp, Immediate(num_dropped * kPointerSize));
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- FrameElement original = elements_[index];
-
- // Is this element the backing store of any copies?
- int new_backing_index = kIllegalIndex;
- if (original.is_copied()) {
- // Verify it is copied, and find first copy.
- for (int i = index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- new_backing_index = i;
- break;
- }
- }
- }
-
- if (new_backing_index == kIllegalIndex) {
- // No copies found, return kIllegalIndex.
- if (original.is_register()) {
- Unuse(original.reg());
- }
- elements_[index] = FrameElement::InvalidElement();
- return kIllegalIndex;
- }
-
- // This is the backing store of copies.
- Register backing_reg;
- if (original.is_memory()) {
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- Use(fresh.reg(), new_backing_index);
- backing_reg = fresh.reg();
- __ movq(backing_reg, Operand(rbp, fp_relative(index)));
- } else {
- // The original was in a register.
- backing_reg = original.reg();
- set_register_location(backing_reg, new_backing_index);
- }
- // Invalidate the element at index.
- elements_[index] = FrameElement::InvalidElement();
- // Set the new backing element.
- if (elements_[new_backing_index].is_synced()) {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::SYNCED,
- original.type_info());
- } else {
- elements_[new_backing_index] =
- FrameElement::RegisterElement(backing_reg,
- FrameElement::NOT_SYNCED,
- original.type_info());
- }
- // Update the other copies.
- for (int i = new_backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == index) {
- elements_[i].set_index(new_backing_index);
- elements_[new_backing_index].set_copied();
- }
- }
- return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- ASSERT(index >= 0);
- ASSERT(index <= element_count());
- FrameElement original = elements_[index];
- int new_backing_store_index = InvalidateFrameSlotAt(index);
- if (new_backing_store_index != kIllegalIndex) {
- elements_.Add(CopyElementAt(new_backing_store_index));
- return;
- }
-
- switch (original.type()) {
- case FrameElement::MEMORY: {
- // Emit code to load the original element's data into a register.
- // Push that register as a FrameElement on top of the frame.
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid());
- FrameElement new_element =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- original.type_info());
- Use(fresh.reg(), element_count());
- elements_.Add(new_element);
- __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
- break;
- }
- case FrameElement::REGISTER:
- Use(original.reg(), element_count());
- // Fall through.
- case FrameElement::CONSTANT:
- case FrameElement::COPY:
- original.clear_sync();
- elements_.Add(original);
- break;
- case FrameElement::INVALID:
- UNREACHABLE();
- break;
- }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- // Store the value on top of the frame to the virtual frame slot at
- // a given index. The value on top of the frame is left in place.
- // This is a duplicating operation, so it can create copies.
- ASSERT(index >= 0);
- ASSERT(index < element_count());
-
- int top_index = element_count() - 1;
- FrameElement top = elements_[top_index];
- FrameElement original = elements_[index];
- if (top.is_copy() && top.index() == index) return;
- ASSERT(top.is_valid());
-
- InvalidateFrameSlotAt(index);
-
- // InvalidateFrameSlotAt can potentially change any frame element, due
- // to spilling registers to allocate temporaries in order to preserve
- // the copy-on-write semantics of aliased elements. Reload top from
- // the frame.
- top = elements_[top_index];
-
- if (top.is_copy()) {
- // There are two cases based on the relative positions of the
- // stored-to slot and the backing slot of the top element.
- int backing_index = top.index();
- ASSERT(backing_index != index);
- if (backing_index < index) {
- // 1. The top element is a copy of a slot below the stored-to
- // slot. The stored-to slot becomes an unsynced copy of that
- // same backing slot.
- elements_[index] = CopyElementAt(backing_index);
- } else {
- // 2. The top element is a copy of a slot above the stored-to
- // slot. The stored-to slot becomes the new (unsynced) backing
- // slot and both the top element and the element at the former
- // backing slot become copies of it. The sync state of the top
- // and former backing elements is preserved.
- FrameElement backing_element = elements_[backing_index];
- ASSERT(backing_element.is_memory() || backing_element.is_register());
- if (backing_element.is_memory()) {
- // Because sets of copies are canonicalized to be backed by
- // their lowest frame element, and because memory frame
- // elements are backed by the corresponding stack address, we
- // have to move the actual value down in the stack.
- //
- // TODO(209): considering allocating the stored-to slot to the
- // temp register. Alternatively, allow copies to appear in
- // any order in the frame and lazily move the value down to
- // the slot.
- __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else {
- set_register_location(backing_element.reg(), index);
- if (backing_element.is_synced()) {
- // If the element is a register, we will not actually move
- // anything on the stack but only update the virtual frame
- // element.
- backing_element.clear_sync();
- }
- }
- elements_[index] = backing_element;
-
- // The old backing element becomes a copy of the new backing
- // element.
- FrameElement new_element = CopyElementAt(index);
- elements_[backing_index] = new_element;
- if (backing_element.is_synced()) {
- elements_[backing_index].set_sync();
- }
-
- // All the copies of the old backing element (including the top
- // element) become copies of the new backing element.
- for (int i = backing_index + 1; i < element_count(); i++) {
- if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
- elements_[i].set_index(index);
- }
- }
- }
- return;
- }
-
- // Move the top element to the stored-to slot and replace it (the
- // top element) with a copy.
- elements_[index] = top;
- if (top.is_memory()) {
- // TODO(209): consider allocating the stored-to slot to the temp
- // register. Alternatively, allow copies to appear in any order
- // in the frame and lazily move the value down to the slot.
- FrameElement new_top = CopyElementAt(index);
- new_top.set_sync();
- elements_[top_index] = new_top;
-
- // The sync state of the former top element is correct (synced).
- // Emit code to move the value down in the frame.
- __ movq(kScratchRegister, Operand(rsp, 0));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else if (top.is_register()) {
- set_register_location(top.reg(), index);
- // The stored-to slot has the (unsynced) register reference and
- // the top element becomes a copy. The sync state of the top is
- // preserved.
- FrameElement new_top = CopyElementAt(index);
- if (top.is_synced()) {
- new_top.set_sync();
- elements_[index].clear_sync();
- }
- elements_[top_index] = new_top;
- } else {
- // The stored-to slot holds the same value as the top but
- // unsynced. (We do not have copies of constants yet.)
- ASSERT(top.is_constant());
- elements_[index].clear_sync();
- }
-}
-
-
-void VirtualFrame::MakeMergable() {
- for (int i = 0; i < element_count(); i++) {
- FrameElement element = elements_[i];
-
- // In all cases we have to reset the number type information
- // to unknown for a mergable frame because of incoming back edges.
- if (element.is_constant() || element.is_copy()) {
- if (element.is_synced()) {
- // Just spill.
- elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
- } else {
- // Allocate to a register.
- FrameElement backing_element; // Invalid if not a copy.
- if (element.is_copy()) {
- backing_element = elements_[element.index()];
- }
- Result fresh = cgen()->allocator()->Allocate();
- ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
- elements_[i] =
- FrameElement::RegisterElement(fresh.reg(),
- FrameElement::NOT_SYNCED,
- TypeInfo::Unknown());
- Use(fresh.reg(), i);
-
- // Emit a move.
- if (element.is_constant()) {
- __ Move(fresh.reg(), element.handle());
- } else {
- ASSERT(element.is_copy());
- // Copies are only backed by register or memory locations.
- if (backing_element.is_register()) {
- // The backing store may have been spilled by allocating,
- // but that's OK. If it was, the value is right where we
- // want it.
- if (!fresh.reg().is(backing_element.reg())) {
- __ movq(fresh.reg(), backing_element.reg());
- }
- } else {
- ASSERT(backing_element.is_memory());
- __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
- }
- }
- }
- // No need to set the copied flag --- there are no copies.
- } else {
- // Clear the copy flag of non-constant, non-copy elements.
- // They cannot be copied because copies are not allowed.
- // The copy flag is not relied on before the end of this loop,
- // including when registers are spilled.
- elements_[i].clear_copied();
- elements_[i].set_type_info(TypeInfo::Unknown());
- }
- }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm(), "[ Merge frame");
- // We should always be merging the code generator's current frame to an
- // expected frame.
- ASSERT(cgen()->frame() == this);
-
- // Adjust the stack pointer upward (toward the top of the virtual
- // frame) if necessary.
- if (stack_pointer_ < expected->stack_pointer_) {
- int difference = expected->stack_pointer_ - stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ subq(rsp, Immediate(difference * kPointerSize));
- }
-
- MergeMoveRegistersToMemory(expected);
- MergeMoveRegistersToRegisters(expected);
- MergeMoveMemoryToRegisters(expected);
-
- // Adjust the stack pointer downward if necessary.
- if (stack_pointer_ > expected->stack_pointer_) {
- int difference = stack_pointer_ - expected->stack_pointer_;
- stack_pointer_ = expected->stack_pointer_;
- __ addq(rsp, Immediate(difference * kPointerSize));
- }
-
- // At this point, the frames should be identical.
- ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- // Move registers, constants, and copies to memory. Perform moves
- // from the top downward in the frame in order to leave the backing
- // stores of copies in registers.
- for (int i = element_count() - 1; i >= 0; i--) {
- FrameElement target = expected->elements_[i];
- if (target.is_register()) continue; // Handle registers later.
- if (target.is_memory()) {
- FrameElement source = elements_[i];
- switch (source.type()) {
- case FrameElement::INVALID:
- // Not a legal merge move.
- UNREACHABLE();
- break;
-
- case FrameElement::MEMORY:
- // Already in place.
- break;
-
- case FrameElement::REGISTER:
- Unuse(source.reg());
- if (!source.is_synced()) {
- __ movq(Operand(rbp, fp_relative(i)), source.reg());
- }
- break;
-
- case FrameElement::CONSTANT:
- if (!source.is_synced()) {
- __ Move(Operand(rbp, fp_relative(i)), source.handle());
- }
- break;
-
- case FrameElement::COPY:
- if (!source.is_synced()) {
- int backing_index = source.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- __ movq(kScratchRegister,
- Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
- } else {
- ASSERT(backing_element.is_register());
- __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
- }
- }
- break;
- }
- }
- elements_[i] = target;
- }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
- // We have already done X-to-memory moves.
- ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- // Move the right value into register i if it is currently in a register.
- int index = expected->register_location(i);
- int use_index = register_location(i);
- // Skip if register i is unused in the target or else if source is
- // not a register (this is not a register-to-register move).
- if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
- Register target = RegisterAllocator::ToRegister(i);
- Register source = elements_[index].reg();
- if (index != use_index) {
- if (use_index == kIllegalIndex) { // Target is currently unused.
- // Copy contents of source from source to target.
- // Set frame element register to target.
- Use(target, index);
- Unuse(source);
- __ movq(target, source);
- } else {
- // Exchange contents of registers source and target.
- // Nothing except the register backing use_index has changed.
- elements_[use_index].set_reg(source);
- set_register_location(target, index);
- set_register_location(source, use_index);
- __ xchg(source, target);
- }
- }
-
- if (!elements_[index].is_synced() &&
- expected->elements_[index].is_synced()) {
- __ movq(Operand(rbp, fp_relative(index)), target);
- }
- elements_[index] = expected->elements_[index];
- }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
- // Move memory, constants, and copies to registers. This is the
- // final step and since it is not done from the bottom up, but in
- // register code order, we have special code to ensure that the backing
- // elements of copies are in their correct locations when we
- // encounter the copies.
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int index = expected->register_location(i);
- if (index != kIllegalIndex) {
- FrameElement source = elements_[index];
- FrameElement target = expected->elements_[index];
- Register target_reg = RegisterAllocator::ToRegister(i);
- ASSERT(target.reg().is(target_reg));
- switch (source.type()) {
- case FrameElement::INVALID: // Fall through.
- UNREACHABLE();
- break;
- case FrameElement::REGISTER:
- ASSERT(source.Equals(target));
- // Go to next iteration. Skips Use(target_reg) and syncing
- // below. It is safe to skip syncing because a target
- // register frame element would only be synced if all source
- // elements were.
- continue;
- break;
- case FrameElement::MEMORY:
- ASSERT(index <= stack_pointer_);
- __ movq(target_reg, Operand(rbp, fp_relative(index)));
- break;
-
- case FrameElement::CONSTANT:
- __ Move(target_reg, source.handle());
- break;
-
- case FrameElement::COPY: {
- int backing_index = source.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- ASSERT(backing_index <= stack_pointer_);
- // Code optimization if backing store should also move
- // to a register: move backing store to its register first.
- if (expected->elements_[backing_index].is_register()) {
- FrameElement new_backing = expected->elements_[backing_index];
- Register new_backing_reg = new_backing.reg();
- ASSERT(!is_used(new_backing_reg));
- elements_[backing_index] = new_backing;
- Use(new_backing_reg, backing_index);
- __ movq(new_backing_reg,
- Operand(rbp, fp_relative(backing_index)));
- __ movq(target_reg, new_backing_reg);
- } else {
- __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
- }
- } else {
- __ movq(target_reg, backing.reg());
- }
- }
- }
- // Ensure the proper sync state.
- if (target.is_synced() && !source.is_synced()) {
- __ movq(Operand(rbp, fp_relative(index)), target_reg);
- }
- Use(target_reg, index);
- elements_[index] = target;
- }
- }
-}
-
-
-Result VirtualFrame::Pop() {
- FrameElement element = elements_.RemoveLast();
- int index = element_count();
- ASSERT(element.is_valid());
-
- // Get number type information of the result.
- TypeInfo info;
- if (!element.is_copy()) {
- info = element.type_info();
- } else {
- info = elements_[element.index()].type_info();
- }
-
- bool pop_needed = (stack_pointer_ == index);
- if (pop_needed) {
- stack_pointer_--;
- if (element.is_memory()) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ pop(temp.reg());
- temp.set_type_info(info);
- return temp;
- }
-
- __ addq(rsp, Immediate(kPointerSize));
- }
- ASSERT(!element.is_memory());
-
- // The top element is a register, constant, or a copy. Unuse
- // registers and follow copies to their backing store.
- if (element.is_register()) {
- Unuse(element.reg());
- } else if (element.is_copy()) {
- ASSERT(element.index() < index);
- index = element.index();
- element = elements_[index];
- }
- ASSERT(!element.is_copy());
-
- // The element is memory, a register, or a constant.
- if (element.is_memory()) {
- // Memory elements could only be the backing store of a copy.
- // Allocate the original to a register.
- ASSERT(index <= stack_pointer_);
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- Use(temp.reg(), index);
- FrameElement new_element =
- FrameElement::RegisterElement(temp.reg(),
- FrameElement::SYNCED,
- element.type_info());
- // Preserve the copy flag on the element.
- if (element.is_copied()) new_element.set_copied();
- elements_[index] = new_element;
- __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
- return Result(temp.reg(), info);
- } else if (element.is_register()) {
- return Result(element.reg(), info);
- } else {
- ASSERT(element.is_constant());
- return Result(element.handle());
- }
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->ToRegister(rax);
- arg->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
-
- if (arg0->is_register() && arg0->reg().is(rax)) {
- if (arg1->is_register() && arg1->reg().is(rdx)) {
- // Wrong registers.
- __ xchg(rax, rdx);
- } else {
- // Register rdx is free for arg0, which frees rax for arg1.
- arg0->ToRegister(rdx);
- arg1->ToRegister(rax);
- }
- } else {
- // Register rax is free for arg1, which guarantees rdx is free for
- // arg0.
- arg1->ToRegister(rax);
- arg0->ToRegister(rdx);
- }
-
- arg0->Unuse();
- arg1->Unuse();
- return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
- Result function = Pop();
-
- // InvokeFunction requires function in rdi. Move it in there.
- function.ToRegister(rdi);
- function.Unuse();
-
- // +1 for receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- ASSERT(cgen()->HasValidEntryRegisters());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION);
- RestoreContextRegister();
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- // Emit code to write elements below the stack pointer to their
- // (already allocated) stack address.
- ASSERT(index <= stack_pointer_);
- FrameElement element = elements_[index];
- ASSERT(!element.is_synced());
- switch (element.type()) {
- case FrameElement::INVALID:
- break;
-
- case FrameElement::MEMORY:
- // This function should not be called with synced elements.
- // (memory elements are always synced).
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ movq(Operand(rbp, fp_relative(index)), element.reg());
- break;
-
- case FrameElement::CONSTANT:
- __ Move(Operand(rbp, fp_relative(index)), element.handle());
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing_element = elements_[backing_index];
- if (backing_element.is_memory()) {
- __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
- __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
- } else {
- ASSERT(backing_element.is_register());
- __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- // Sync an element of the frame that is just above the stack pointer
- // by pushing it.
- ASSERT(index == stack_pointer_ + 1);
- stack_pointer_++;
- FrameElement element = elements_[index];
-
- switch (element.type()) {
- case FrameElement::INVALID:
- __ Push(Smi::FromInt(0));
- break;
-
- case FrameElement::MEMORY:
- // No memory elements exist above the stack pointer.
- UNREACHABLE();
- break;
-
- case FrameElement::REGISTER:
- __ push(element.reg());
- break;
-
- case FrameElement::CONSTANT:
- __ Move(kScratchRegister, element.handle());
- __ push(kScratchRegister);
- break;
-
- case FrameElement::COPY: {
- int backing_index = element.index();
- FrameElement backing = elements_[backing_index];
- ASSERT(backing.is_memory() || backing.is_register());
- if (backing.is_memory()) {
- __ push(Operand(rbp, fp_relative(backing_index)));
- } else {
- __ push(backing.reg());
- }
- break;
- }
- }
- elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
- ASSERT(begin >= 0);
- ASSERT(end < element_count());
- // Sync elements below the range if they have not been materialized
- // on the stack.
- int start = Min(begin, stack_pointer_ + 1);
- int end_or_stack_pointer = Min(stack_pointer_, end);
- // Emit normal push instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
- int i = start;
-
- while (i <= end_or_stack_pointer) {
- if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
- i++;
- }
- while (i <= end) {
- SyncElementByPushing(i);
- i++;
- }
-}
-
-
-//------------------------------------------------------------------------------
-// Virtual frame stub and IC calling functions.
-
-Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- PrepareForCall(0, 0);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ DebugBreak();
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b. Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg) {
- ASSERT(!a_reg.is(b_reg));
- // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
- ASSERT(cgen()->allocator()->count(a_reg) <= 2);
- ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
- ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
- ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
- (a->is_register() && a->reg().is(a_reg)) ||
- (b->is_register() && b->reg().is(a_reg)));
- // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
- ASSERT(cgen()->allocator()->count(b_reg) <= 2);
- ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
- ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
- ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
- (a->is_register() && a->reg().is(b_reg)) ||
- (b->is_register() && b->reg().is(b_reg)));
-
- if (a->is_register() && a->reg().is(a_reg)) {
- b->ToRegister(b_reg);
- } else if (!cgen()->allocator()->is_used(a_reg)) {
- a->ToRegister(a_reg);
- b->ToRegister(b_reg);
- } else if (cgen()->allocator()->is_used(b_reg)) {
- // a must be in b_reg, b in a_reg.
- __ xchg(a_reg, b_reg);
- // Results a and b will be invalidated, so it is ok if they are switched.
- } else {
- b->ToRegister(b_reg);
- a->ToRegister(a_reg);
- }
- a->Unuse();
- b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. Both are dropped.
- // The IC expects name in rcx and receiver in rax.
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- Result name = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&name, &receiver, rcx, rax);
-
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. Put them in rax and rdx.
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&key, &receiver, rax, rdx);
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
- bool is_contextual,
- StrictModeFlag strict_mode) {
- // Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
- Result value = Pop();
- RelocInfo::Mode mode;
- if (is_contextual) {
- PrepareForCall(0, 0);
- value.ToRegister(rax);
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- value.Unuse();
- mode = RelocInfo::CODE_TARGET_CONTEXT;
- } else {
- Result receiver = Pop();
- PrepareForCall(0, 0);
- MoveResultsToRegisters(&value, &receiver, rax, rdx);
- mode = RelocInfo::CODE_TARGET;
- }
- __ Move(rcx, name);
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
- // Value, key, and receiver are on the top of the frame. The IC
- // expects value in rax, key in rcx, and receiver in rdx.
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- PrepareForCall(0, 0);
- if (!cgen()->allocator()->is_used(rax) ||
- (value.is_register() && value.reg().is(rax))) {
- if (!cgen()->allocator()->is_used(rax)) {
- value.ToRegister(rax);
- }
- MoveResultsToRegisters(&key, &receiver, rcx, rdx);
- value.Unuse();
- } else if (!cgen()->allocator()->is_used(rcx) ||
- (key.is_register() && key.reg().is(rcx))) {
- if (!cgen()->allocator()->is_used(rcx)) {
- key.ToRegister(rcx);
- }
- MoveResultsToRegisters(&value, &receiver, rax, rdx);
- key.Unuse();
- } else if (!cgen()->allocator()->is_used(rdx) ||
- (receiver.is_register() && receiver.reg().is(rdx))) {
- if (!cgen()->allocator()->is_used(rdx)) {
- receiver.ToRegister(rdx);
- }
- MoveResultsToRegisters(&key, &value, rcx, rax);
- receiver.Unuse();
- } else {
- // All three registers are used, and no value is in the correct place.
- // We have one of the two circular permutations of rax, rcx, rdx.
- ASSERT(value.is_register());
- if (value.reg().is(rcx)) {
- __ xchg(rax, rdx);
- __ xchg(rax, rcx);
- } else {
- __ xchg(rax, rcx);
- __ xchg(rax, rdx);
- }
- value.Unuse();
- key.Unuse();
- receiver.Unuse();
- }
-
- Handle<Code> ic(Builtins::builtin(
- (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are found on top of the frame
- // and dropped by the call. The IC expects the name in rcx and the rest
- // on the stack, and drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
- Result name = Pop();
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- name.ToRegister(rcx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
- int arg_count,
- int loop_nesting) {
- // Function name, arguments, and receiver are found on top of the frame
- // and dropped by the call. The IC expects the name in rcx and the rest
- // on the stack, and drops them all.
- InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> ic =
- StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
- Result name = Pop();
- // Spill args, receiver, and function. The call will drop args and
- // receiver.
- PrepareForCall(arg_count + 1, arg_count + 1);
- name.ToRegister(rcx);
- name.Unuse();
- return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
- // Arguments, receiver, and function are on top of the frame. The
- // IC expects arg count in rax, function in rdi, and the arguments
- // and receiver on the stack.
- Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- // Duplicate the function before preparing the frame.
- PushElementAt(arg_count);
- Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
- function.ToRegister(rdi);
-
- // Constructors are called with the number of arguments in register
- // rax for now. Another option would be to have separate construct
- // call trampolines per different arguments counts encountered.
- Result num_args = cgen()->allocator()->Allocate(rax);
- ASSERT(num_args.is_valid());
- __ Set(num_args.reg(), arg_count);
-
- function.Unuse();
- num_args.Unuse();
- return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- ASSERT(cgen()->HasValidEntryRegisters());
- // Grow the expression stack by handler size less one (the return
- // address is already pushed by a call instruction).
- Adjust(kHandlerSize - 1);
- __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
deleted file mode 100644
index 7396db17f6..0000000000
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ /dev/null
@@ -1,593 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
-#define V8_X64_VIRTUAL_FRAME_X64_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
- ASSERT(cgen()->has_valid_frame());
- cgen()->frame()->SpillAll();
- cgen()->set_in_spilled_code(true);
- }
-
- ~SpilledScope() {
- cgen()->set_in_spilled_code(previous_state_);
- }
-
- private:
- bool previous_state_;
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- TypeInfo info = TypeInfo::Uninitialized());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- inline int register_location(Register reg);
-
- inline void set_register_location(Register reg, int index);
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- inline bool is_used(Register reg);
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget count elements from the top of the frame all in-memory
- // (including synced) and adjust the stack pointer downward, to
- // match an external frame effect (examples include a call removing
- // its arguments, and exiting a try/catch removing an exception
- // handler). No code will be emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- ForgetElements(count);
- }
-
- // Forget count elements from the top of the frame without adjusting
- // the stack pointer downward. This is used, for example, before
- // merging frames at break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- inline void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Spill the top element of the frame to memory.
- void SpillTop() { SpillElementAt(element_count() - 1); }
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Make this frame so that an arbitrary frame of the same height can
- // be merged to it. Copies and constants are removed from the frame.
- void MakeMergable();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Use(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 14;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // An element of the expression stack as an assembly operand.
- Operand ElementAt(int index) const {
- return Operand(rsp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- inline void SetElementAt(int index, Handle<Object> value);
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- void StoreToElementAt(int index) {
- StoreToFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return Operand(rbp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
-
- // Save the value of the esi register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the esi register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- Operand ParameterAt(int index) {
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count());
- return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- Operand Receiver() { return ParameterAt(-1); }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline Result CallStub(CodeStub* stub, int arg_count);
-
- // Call stub that takes a single argument passed in eax. The
- // argument is given as a result which does not have to be eax or
- // even a register. The argument is consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
- // eax (arg1, rax). The arguments are given as results which do not have
- // to be in the proper registers or even in registers. The
- // arguments are consumed by the call.
- Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- Result CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- Result CallRuntime(Runtime::Function* f, int arg_count);
- Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- Result InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count);
-
- // Call load IC. Name and receiver are found on top of the frame.
- // Both are dropped.
- Result CallLoadIC(RelocInfo::Mode mode);
-
- // Call keyed load IC. Key and receiver are found on top of the
- // frame. Both are dropped.
- Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual,
- StrictModeFlag strict_mode);
-
- // Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All three are dropped.
- Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
- // Call call IC. Function name, arguments, and receiver are found on top
- // of the frame and dropped by the call.
- // The argument count does not include the receiver.
- Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Call keyed call IC. Same calling convention as CallCallIC.
- Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
- // Allocate and call JS function as constructor. Arguments,
- // receiver (global object), and function are found on top of the
- // frame. Function is not dropped. The argument count does not
- // include the receiver.
- Result CallConstructor(int arg_count);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
-
- // Duplicate the n'th element from the top of the frame.
- // Dup(1) is equivalent to Dup().
- void Dup(int n) {
- ASSERT(n > 0);
- PushFrameSlotAt(element_count() - n);
- }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- void EmitPop(const Operand& operand);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(const Operand& operand,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Heap::RootListIndex index,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Immediate immediate,
- TypeInfo info = TypeInfo::Unknown());
- void EmitPush(Smi* value);
- // Uses kScratchRegister, emits appropriate relocation info.
- void EmitPush(Handle<Object> value);
-
- inline bool ConstantPoolOverflowed();
-
- // Push an element on the virtual frame.
- void Push(Handle<Object> value);
- inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Smi* value);
-
- // Pushing a result invalidates it (its contents become owned by the
- // frame).
- void Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg(), result->type_info());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
- }
-
- // Pushing an expression expects that the expression is trivial (according
- // to Expression::IsTrivial).
- void Push(Expression* expr);
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- inline void SetTypeForLocalAt(int index, TypeInfo info);
- inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the esp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
- inline int local_count();
-
- // The index of the element that is at the processor's frame pointer
- // (the ebp register). The parameters, receiver, and return address
- // are below the frame pointer.
- int frame_pointer() { return parameter_count() + 2; }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // above the frame pointer.
- int context_index() { return frame_pointer() + 1; }
-
- // The index of the function slot in the frame. It is above the frame
- // pointer and the context slot.
- int function_index() { return frame_pointer() + 2; }
-
- // The index of the first local. Between the frame pointer and the
- // locals lie the context and the function.
- int local0_index() { return frame_pointer() + 3; }
-
- // The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // This function assumes that a and b are the only results that could be in
- // the registers a_reg or b_reg. Other results can be live, but must not
- // be in the registers a_reg or b_reg. The results a and b are invalidated.
- void MoveResultsToRegisters(Result* a,
- Result* b,
- Register a_reg,
- Register b_reg);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- Result RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class FrameRegisterState;
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_VIRTUAL_FRAME_X64_H_