diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-07-05 14:40:13 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-07-05 14:51:29 -0700 |
commit | 149562555c9bf56457dee9a1ad70c53ed670a776 (patch) | |
tree | f6217cf3c54ddbee03f37247a3c7c75203f868fd /deps/v8/src/arm | |
parent | f08720606757577d95bd09b48697c7decbf17f00 (diff) | |
download | node-new-149562555c9bf56457dee9a1ad70c53ed670a776.tar.gz |
Downgrade V8 to 3.1.8.25
There are serious performance regressions both in V8 and our own legacy
networking stack. Until we correct our own problems we are going back to the
old V8.
Diffstat (limited to 'deps/v8/src/arm')
36 files changed, 16032 insertions, 8033 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 3e19a45385..3b811021b3 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -203,12 +203,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(target_reference_address()); #ifdef ENABLE_DEBUGGER_SUPPORT - // TODO(isolates): Get a cached isolate below. - } else if (((RelocInfo::IsJSReturn(mode) && + } else if (Debug::has_break_points() && + ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && - IsPatchedDebugBreakSlotSequence())) && - Isolate::Current()->debug()->has_break_points()) { + IsPatchedDebugBreakSlotSequence()))) { visitor->VisitDebugTarget(this); #endif } else if (mode == RelocInfo::RUNTIME_ENTRY) { @@ -218,23 +217,23 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { template<typename StaticVisitor> -void RelocInfo::Visit(Heap* heap) { +void RelocInfo::Visit() { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitPointer(target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { - StaticVisitor::VisitCodeTarget(heap, this); + StaticVisitor::VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { - StaticVisitor::VisitGlobalPropertyCell(heap, this); + StaticVisitor::VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(target_reference_address()); #ifdef ENABLE_DEBUGGER_SUPPORT - } else if (heap->isolate()->debug()->has_break_points() && + } else if (Debug::has_break_points() && ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(mode) && IsPatchedDebugBreakSlotSequence()))) { - StaticVisitor::VisitDebugTarget(heap, this); + StaticVisitor::VisitDebugTarget(this); #endif } else if (mode == RelocInfo::RUNTIME_ENTRY) { StaticVisitor::VisitRuntimeEntry(this); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index efa252dbac..c91d4ba2bc 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -32,7 +32,7 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. #include "v8.h" @@ -44,80 +44,62 @@ namespace v8 { namespace internal { -#ifdef DEBUG -bool CpuFeatures::initialized_ = false; -#endif +// Safe default is no features. unsigned CpuFeatures::supported_ = 0; +unsigned CpuFeatures::enabled_ = 0; unsigned CpuFeatures::found_by_runtime_probing_ = 0; -// Get the CPU features enabled by the build. For cross compilation the -// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS -// can be defined to enable ARMv7 and VFPv3 instructions when building the -// snapshot. +#ifdef __arm__ static uint64_t CpuFeaturesImpliedByCompiler() { uint64_t answer = 0; #ifdef CAN_USE_ARMV7_INSTRUCTIONS answer |= 1u << ARMv7; #endif // def CAN_USE_ARMV7_INSTRUCTIONS -#ifdef CAN_USE_VFP_INSTRUCTIONS - answer |= 1u << VFP3 | 1u << ARMv7; -#endif // def CAN_USE_VFP_INSTRUCTIONS - -#ifdef __arm__ // If the compiler is allowed to use VFP then we can use VFP too in our code // generation even when generating snapshots. This won't work for cross - // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. + // compilation. #if defined(__VFP_FP__) && !defined(__SOFTFP__) - answer |= 1u << VFP3 | 1u << ARMv7; + answer |= 1u << VFP3; #endif // defined(__VFP_FP__) && !defined(__SOFTFP__) -#endif // def __arm__ - +#ifdef CAN_USE_VFP_INSTRUCTIONS + answer |= 1u << VFP3; +#endif // def CAN_USE_VFP_INSTRUCTIONS return answer; } +#endif // def __arm__ -void CpuFeatures::Probe() { - ASSERT(!initialized_); -#ifdef DEBUG - initialized_ = true; -#endif - - // Get the features implied by the OS and the compiler settings. This is the - // minimal set of features which is also alowed for generated code in the - // snapshot. - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - supported_ |= CpuFeaturesImpliedByCompiler(); - - if (Serializer::enabled()) { - // No probing for features if we might serialize (generate snapshot). - return; - } - +void CpuFeatures::Probe(bool portable) { #ifndef __arm__ - // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is - // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. + // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled. if (FLAG_enable_vfp3) { - supported_ |= 1u << VFP3 | 1u << ARMv7; + supported_ |= 1u << VFP3; } // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled if (FLAG_enable_armv7) { supported_ |= 1u << ARMv7; } #else // def __arm__ - // Probe for additional features not already known to be available. - if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { - // This implementation also sets the VFP flags if runtime - // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI - // 0406B, page A1-6. - supported_ |= 1u << VFP3 | 1u << ARMv7; - found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7; + if (portable && Serializer::enabled()) { + supported_ |= OS::CpuFeaturesImpliedByPlatform(); + supported_ |= CpuFeaturesImpliedByCompiler(); + return; // No features if we might serialize. } - if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { + if (OS::ArmCpuHasFeature(VFP3)) { + // This implementation also sets the VFP flags if + // runtime detection of VFP returns true. + supported_ |= 1u << VFP3; + found_by_runtime_probing_ |= 1u << VFP3; + } + + if (OS::ArmCpuHasFeature(ARMv7)) { supported_ |= 1u << ARMv7; found_by_runtime_probing_ |= 1u << ARMv7; } + + if (!portable) found_by_runtime_probing_ = 0; #endif } @@ -166,7 +148,7 @@ Operand::Operand(Handle<Object> handle) { rm_ = no_reg; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; - ASSERT(!HEAP->InNewSpace(obj)); + ASSERT(!Heap::InNewSpace(obj)); if (obj->IsHeapObject()) { imm32_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; @@ -284,20 +266,21 @@ const Instr kLdrStrOffsetMask = 0x00000fff; // Spare buffer. static const int kMinimalBufferSize = 4*KB; +static byte* spare_buffer_ = NULL; -Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) - : AssemblerBase(arg_isolate), - positions_recorder_(this), - emit_debug_code_(FLAG_debug_code) { +Assembler::Assembler(void* buffer, int buffer_size) + : positions_recorder_(this), + allow_peephole_optimization_(false) { + allow_peephole_optimization_ = FLAG_peephole_optimization; if (buffer == NULL) { // Do our own buffer management. if (buffer_size <= kMinimalBufferSize) { buffer_size = kMinimalBufferSize; - if (isolate()->assembler_spare_buffer() != NULL) { - buffer = isolate()->assembler_spare_buffer(); - isolate()->set_assembler_spare_buffer(NULL); + if (spare_buffer_ != NULL) { + buffer = spare_buffer_; + spare_buffer_ = NULL; } } if (buffer == NULL) { @@ -320,22 +303,20 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) ASSERT(buffer_ != NULL); pc_ = buffer_; reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); - num_pending_reloc_info_ = 0; + num_prinfo_ = 0; next_buffer_check_ = 0; const_pool_blocked_nesting_ = 0; no_const_pool_before_ = 0; - first_const_pool_use_ = -1; + last_const_pool_end_ = 0; last_bound_pos_ = 0; - ast_id_for_reloc_info_ = kNoASTId; } Assembler::~Assembler() { ASSERT(const_pool_blocked_nesting_ == 0); if (own_buffer_) { - if (isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); + if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { + spare_buffer_ = buffer_; } else { DeleteArray(buffer_); } @@ -346,7 +327,7 @@ Assembler::~Assembler() { void Assembler::GetCode(CodeDesc* desc) { // Emit constant pool if necessary. CheckConstPool(true, false); - ASSERT(num_pending_reloc_info_ == 0); + ASSERT(num_prinfo_ == 0); // Setup code descriptor. desc->buffer = buffer_; @@ -786,36 +767,11 @@ bool Operand::must_use_constant_pool() const { } -bool Operand::is_single_instruction(Instr instr) const { +bool Operand::is_single_instruction() const { if (rm_.is_valid()) return true; + if (must_use_constant_pool()) return false; uint32_t dummy1, dummy2; - if (must_use_constant_pool() || - !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { - // The immediate operand cannot be encoded as a shifter operand, or use of - // constant pool is required. For a mov instruction not setting the - // condition code additional instruction conventions can be used. - if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - if (must_use_constant_pool() || - !CpuFeatures::IsSupported(ARMv7)) { - // mov instruction will be an ldr from constant pool (one instruction). - return true; - } else { - // mov instruction will be a mov or movw followed by movt (two - // instructions). - return false; - } - } else { - // If this is not a mov or mvn instruction there will always an additional - // instructions - either mov or ldr. The mov might actually be two - // instructions mov or movw followed by movt so including the actual - // instruction two or three instructions will be generated. - return false; - } - } else { - // No use of constant pool and the immediate operand can be encoded as a - // shifter operand. - return true; - } + return fits_shifter(imm32_, &dummy1, &dummy2, NULL); } @@ -838,8 +794,7 @@ void Assembler::addrmod1(Instr instr, CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed Condition cond = Instruction::ConditionField(instr); if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - if (x.must_use_constant_pool() || - !CpuFeatures::IsSupported(ARMv7)) { + if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { RecordRelocInfo(x.rmode_, x.imm32_); ldr(rd, MemOperand(pc, 0), cond); } else { @@ -873,7 +828,7 @@ void Assembler::addrmod1(Instr instr, emit(instr | rn.code()*B16 | rd.code()*B12); if (rn.is(pc) || x.rm_.is(pc)) { // Block constant pool emission for one instruction after reading pc. - BlockConstPoolFor(1); + BlockConstPoolBefore(pc_offset() + kInstrSize); } } @@ -997,7 +952,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { // Block the emission of the constant pool, since the branch instruction must // be emitted at the pc offset recorded by the label. - BlockConstPoolFor(1); + BlockConstPoolBefore(pc_offset() + kInstrSize); return target_pos - (pc_offset() + kPcLoadDelta); } @@ -1094,6 +1049,20 @@ void Assembler::rsb(Register dst, Register src1, const Operand& src2, void Assembler::add(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { addrmod1(cond | ADD | s, src1, dst, src2); + + // Eliminate pattern: push(r), pop() + // str(src, MemOperand(sp, 4, NegPreIndex), al); + // add(sp, sp, Operand(kPointerSize)); + // Both instructions can be eliminated. + if (can_peephole_optimize(2) && + // Pattern. + instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && + (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) { + pc_ -= 2 * kInstrSize; + if (FLAG_print_peephole_optimization) { + PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); + } + } } @@ -1398,11 +1367,195 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { positions_recorder()->WriteRecordedPositions(); } addrmod2(cond | B26 | L, dst, src); + + // Eliminate pattern: push(ry), pop(rx) + // str(ry, MemOperand(sp, 4, NegPreIndex), al) + // ldr(rx, MemOperand(sp, 4, PostIndex), al) + // Both instructions can be eliminated if ry = rx. + // If ry != rx, a register copy from ry to rx is inserted + // after eliminating the push and the pop instructions. + if (can_peephole_optimize(2)) { + Instr push_instr = instr_at(pc_ - 2 * kInstrSize); + Instr pop_instr = instr_at(pc_ - 1 * kInstrSize); + + if (IsPush(push_instr) && IsPop(pop_instr)) { + if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) { + // For consecutive push and pop on different registers, + // we delete both the push & pop and insert a register move. + // push ry, pop rx --> mov rx, ry + Register reg_pushed, reg_popped; + reg_pushed = GetRd(push_instr); + reg_popped = GetRd(pop_instr); + pc_ -= 2 * kInstrSize; + // Insert a mov instruction, which is better than a pair of push & pop + mov(reg_popped, reg_pushed); + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop (diff reg) replaced by a reg move\n", + pc_offset()); + } + } else { + // For consecutive push and pop on the same register, + // both the push and the pop can be deleted. + pc_ -= 2 * kInstrSize; + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); + } + } + } + } + + if (can_peephole_optimize(2)) { + Instr str_instr = instr_at(pc_ - 2 * kInstrSize); + Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize); + + if ((IsStrRegFpOffset(str_instr) && + IsLdrRegFpOffset(ldr_instr)) || + (IsStrRegFpNegOffset(str_instr) && + IsLdrRegFpNegOffset(ldr_instr))) { + if ((ldr_instr & kLdrStrInstrArgumentMask) == + (str_instr & kLdrStrInstrArgumentMask)) { + // Pattern: Ldr/str same fp+offset, same register. + // + // The following: + // str rx, [fp, #-12] + // ldr rx, [fp, #-12] + // + // Becomes: + // str rx, [fp, #-12] + + pc_ -= 1 * kInstrSize; + if (FLAG_print_peephole_optimization) { + PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset()); + } + } else if ((ldr_instr & kLdrStrOffsetMask) == + (str_instr & kLdrStrOffsetMask)) { + // Pattern: Ldr/str same fp+offset, different register. + // + // The following: + // str rx, [fp, #-12] + // ldr ry, [fp, #-12] + // + // Becomes: + // str rx, [fp, #-12] + // mov ry, rx + + Register reg_stored, reg_loaded; + reg_stored = GetRd(str_instr); + reg_loaded = GetRd(ldr_instr); + pc_ -= 1 * kInstrSize; + // Insert a mov instruction, which is better than ldr. + mov(reg_loaded, reg_stored); + if (FLAG_print_peephole_optimization) { + PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset()); + } + } + } + } + + if (can_peephole_optimize(3)) { + Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize); + Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize); + Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize); + if (IsPush(mem_write_instr) && + IsPop(mem_read_instr)) { + if ((IsLdrRegFpOffset(ldr_instr) || + IsLdrRegFpNegOffset(ldr_instr))) { + if (Instruction::RdValue(mem_write_instr) == + Instruction::RdValue(mem_read_instr)) { + // Pattern: push & pop from/to same register, + // with a fp+offset ldr in between + // + // The following: + // str rx, [sp, #-4]! + // ldr rz, [fp, #-24] + // ldr rx, [sp], #+4 + // + // Becomes: + // if(rx == rz) + // delete all + // else + // ldr rz, [fp, #-24] + + if (Instruction::RdValue(mem_write_instr) == + Instruction::RdValue(ldr_instr)) { + pc_ -= 3 * kInstrSize; + } else { + pc_ -= 3 * kInstrSize; + // Reinsert back the ldr rz. + emit(ldr_instr); + } + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); + } + } else { + // Pattern: push & pop from/to different registers + // with a fp+offset ldr in between + // + // The following: + // str rx, [sp, #-4]! + // ldr rz, [fp, #-24] + // ldr ry, [sp], #+4 + // + // Becomes: + // if(ry == rz) + // mov ry, rx; + // else if(rx != rz) + // ldr rz, [fp, #-24] + // mov ry, rx + // else if((ry != rz) || (rx == rz)) becomes: + // mov ry, rx + // ldr rz, [fp, #-24] + + Register reg_pushed, reg_popped; + if (Instruction::RdValue(mem_read_instr) == + Instruction::RdValue(ldr_instr)) { + reg_pushed = GetRd(mem_write_instr); + reg_popped = GetRd(mem_read_instr); + pc_ -= 3 * kInstrSize; + mov(reg_popped, reg_pushed); + } else if (Instruction::RdValue(mem_write_instr) != + Instruction::RdValue(ldr_instr)) { + reg_pushed = GetRd(mem_write_instr); + reg_popped = GetRd(mem_read_instr); + pc_ -= 3 * kInstrSize; + emit(ldr_instr); + mov(reg_popped, reg_pushed); + } else if ((Instruction::RdValue(mem_read_instr) != + Instruction::RdValue(ldr_instr)) || + (Instruction::RdValue(mem_write_instr) == + Instruction::RdValue(ldr_instr))) { + reg_pushed = GetRd(mem_write_instr); + reg_popped = GetRd(mem_read_instr); + pc_ -= 3 * kInstrSize; + mov(reg_popped, reg_pushed); + emit(ldr_instr); + } + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); + } + } + } + } + } } void Assembler::str(Register src, const MemOperand& dst, Condition cond) { addrmod2(cond | B26, src, dst); + + // Eliminate pattern: pop(), push(r) + // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al + // -> str r, [sp, 0], al + if (can_peephole_optimize(2) && + // Pattern. + instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && + instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { + pc_ -= 2 * kInstrSize; + emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12); + if (FLAG_print_peephole_optimization) { + PrintF("%x pop()/push(reg) eliminated\n", pc_offset()); + } + } } @@ -1493,17 +1646,15 @@ void Assembler::stm(BlockAddrMode am, void Assembler::stop(const char* msg, Condition cond, int32_t code) { #ifndef __arm__ ASSERT(code >= kDefaultStopCode); - { - // The Simulator will handle the stop instruction and get the message - // address. It expects to find the address just after the svc instruction. - BlockConstPoolScope block_const_pool(this); - if (code >= 0) { - svc(kStopCode + code, cond); - } else { - svc(kStopCode + kMaxStopCode, cond); - } - emit(reinterpret_cast<Instr>(msg)); + // The Simulator will handle the stop instruction and get the message address. + // It expects to find the address just after the svc instruction. + BlockConstPoolFor(2); + if (code >= 0) { + svc(kStopCode + code, cond); + } else { + svc(kStopCode + kMaxStopCode, cond); } + emit(reinterpret_cast<Instr>(msg)); #else // def __arm__ #ifdef CAN_USE_ARMV5_INSTRUCTIONS if (cond != al) { @@ -1642,6 +1793,45 @@ void Assembler::ldc2(Coprocessor coproc, } +void Assembler::stc(Coprocessor coproc, + CRegister crd, + const MemOperand& dst, + LFlag l, + Condition cond) { + addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst); +} + + +void Assembler::stc(Coprocessor coproc, + CRegister crd, + Register rn, + int option, + LFlag l, + Condition cond) { + // Unindexed addressing. + ASSERT(is_uint8(option)); + emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | + coproc*B8 | (option & 255)); +} + + +void Assembler::stc2(Coprocessor + coproc, CRegister crd, + const MemOperand& dst, + LFlag l) { // v5 and above + stc(coproc, crd, dst, l, kSpecialCondition); +} + + +void Assembler::stc2(Coprocessor coproc, + CRegister crd, + Register rn, + int option, + LFlag l) { // v5 and above + stc(coproc, crd, rn, option, l, kSpecialCondition); +} + + // Support for VFP. void Assembler::vldr(const DwVfpRegister dst, @@ -1814,88 +2004,6 @@ void Assembler::vstr(const SwVfpRegister src, } -void Assembler::vldm(BlockAddrMode am, - Register base, - DwVfpRegister first, - DwVfpRegister last, - Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-626. - // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | - // first(15-12) | 1010(11-8) | (count * 2) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); - - int sd, d; - first.split_code(&sd, &d); - int count = last.code() - first.code() + 1; - emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | - 0xB*B8 | count*2); -} - - -void Assembler::vstm(BlockAddrMode am, - Register base, - DwVfpRegister first, - DwVfpRegister last, - Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-784. - // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | - // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); - - int sd, d; - first.split_code(&sd, &d); - int count = last.code() - first.code() + 1; - emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | - 0xB*B8 | count*2); -} - -void Assembler::vldm(BlockAddrMode am, - Register base, - SwVfpRegister first, - SwVfpRegister last, - Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-626. - // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | - // first(15-12) | 1010(11-8) | (count/2) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); - - int sd, d; - first.split_code(&sd, &d); - int count = last.code() - first.code() + 1; - emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | - 0xA*B8 | count); -} - - -void Assembler::vstm(BlockAddrMode am, - Register base, - SwVfpRegister first, - SwVfpRegister last, - Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-784. - // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | - // first(15-12) | 1011(11-8) | (count/2) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT_LE(first.code(), last.code()); - ASSERT(am == ia || am == ia_w || am == db_w); - ASSERT(!base.is(pc)); - - int sd, d; - first.split_code(&sd, &d); - int count = last.code() - first.code() + 1; - emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | - 0xA*B8 | count); -} - static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { uint64_t i; memcpy(&i, &d, 8); @@ -2252,14 +2360,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, } -void Assembler::vneg(const DwVfpRegister dst, - const DwVfpRegister src, - const Condition cond) { - emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 | - 0x5*B9 | B8 | B6 | src.code()); -} - - void Assembler::vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { @@ -2408,6 +2508,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { } +void Assembler::BlockConstPoolFor(int instructions) { + BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); +} + + // Debugging. void Assembler::RecordJSReturn() { positions_recorder()->WriteRecordedPositions(); @@ -2471,8 +2576,8 @@ void Assembler::GrowBuffer() { // to relocate any emitted relocation entries. // Relocate pending relocation entries. - for (int i = 0; i < num_pending_reloc_info_; i++) { - RelocInfo& rinfo = pending_reloc_info_[i]; + for (int i = 0; i < num_prinfo_; i++) { + RelocInfo& rinfo = prinfo_[i]; ASSERT(rinfo.rmode() != RelocInfo::COMMENT && rinfo.rmode() != RelocInfo::POSITION); if (rinfo.rmode() != RelocInfo::JS_RETURN) { @@ -2486,7 +2591,7 @@ void Assembler::db(uint8_t data) { // No relocation info should be pending while using db. db is used // to write pure data with no pointers and the constant pool should // be emitted before using db. - ASSERT(num_pending_reloc_info_ == 0); + ASSERT(num_prinfo_ == 0); CheckBuffer(); *reinterpret_cast<uint8_t*>(pc_) = data; pc_ += sizeof(uint8_t); @@ -2497,7 +2602,7 @@ void Assembler::dd(uint32_t data) { // No relocation info should be pending while using dd. dd is used // to write pure data with no pointers and the constant pool should // be emitted before using dd. - ASSERT(num_pending_reloc_info_ == 0); + ASSERT(num_prinfo_ == 0); CheckBuffer(); *reinterpret_cast<uint32_t*>(pc_) = data; pc_ += sizeof(uint32_t); @@ -2514,14 +2619,11 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { || RelocInfo::IsPosition(rmode)); // These modes do not need an entry in the constant pool. } else { - ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); - if (num_pending_reloc_info_ == 0) { - first_const_pool_use_ = pc_offset(); - } - pending_reloc_info_[num_pending_reloc_info_++] = rinfo; + ASSERT(num_prinfo_ < kMaxNumPRInfo); + prinfo_[num_prinfo_++] = rinfo; // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. - BlockConstPoolFor(1); + BlockConstPoolBefore(pc_offset() + kInstrSize); } if (rinfo.rmode() != RelocInfo::NONE) { // Don't record external references unless the heap will be serialized. @@ -2531,129 +2633,121 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { Serializer::TooLateToEnableNow(); } #endif - if (!Serializer::enabled() && !emit_debug_code()) { + if (!Serializer::enabled() && !FLAG_debug_code) { return; } } ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here - if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - ASSERT(ast_id_for_reloc_info_ != kNoASTId); - RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_); - ast_id_for_reloc_info_ = kNoASTId; - reloc_info_writer.Write(&reloc_info_with_ast_id); - } else { - reloc_info_writer.Write(&rinfo); - } + reloc_info_writer.Write(&rinfo); } } -void Assembler::BlockConstPoolFor(int instructions) { - int pc_limit = pc_offset() + instructions * kInstrSize; - if (no_const_pool_before_ < pc_limit) { - // If there are some pending entries, the constant pool cannot be blocked - // further than first_const_pool_use_ + kMaxDistToPool - ASSERT((num_pending_reloc_info_ == 0) || - (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); - no_const_pool_before_ = pc_limit; +void Assembler::CheckConstPool(bool force_emit, bool require_jump) { + // Calculate the offset of the next check. It will be overwritten + // when a const pool is generated or when const pools are being + // blocked for a specific range. + next_buffer_check_ = pc_offset() + kCheckConstInterval; + + // There is nothing to do if there are no pending relocation info entries. + if (num_prinfo_ == 0) return; + + // We emit a constant pool at regular intervals of about kDistBetweenPools + // or when requested by parameter force_emit (e.g. after each function). + // We prefer not to emit a jump unless the max distance is reached or if we + // are running low on slots, which can happen if a lot of constants are being + // emitted (e.g. --debug-code and many static references). + int dist = pc_offset() - last_const_pool_end_; + if (!force_emit && dist < kMaxDistBetweenPools && + (require_jump || dist < kDistBetweenPools) && + // TODO(1236125): Cleanup the "magic" number below. We know that + // the code generation will test every kCheckConstIntervalInst. + // Thus we are safe as long as we generate less than 7 constant + // entries per instruction. + (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) { + return; } - if (next_buffer_check_ < no_const_pool_before_) { - next_buffer_check_ = no_const_pool_before_; - } -} + // If we did not return by now, we need to emit the constant pool soon. + // However, some small sequences of instructions must not be broken up by the + // insertion of a constant pool; such sequences are protected by setting + // either const_pool_blocked_nesting_ or no_const_pool_before_, which are + // both checked here. Also, recursive calls to CheckConstPool are blocked by + // no_const_pool_before_. + if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) { + // Emission is currently blocked; make sure we try again as soon as + // possible. + if (const_pool_blocked_nesting_ > 0) { + next_buffer_check_ = pc_offset() + kInstrSize; + } else { + next_buffer_check_ = no_const_pool_before_; + } -void Assembler::CheckConstPool(bool force_emit, bool require_jump) { - // Some short sequence of instruction mustn't be broken up by constant pool - // emission, such sequences are protected by calls to BlockConstPoolFor and - // BlockConstPoolScope. - if (is_const_pool_blocked()) { // Something is wrong if emission is forced and blocked at the same time. ASSERT(!force_emit); return; } - // There is nothing to do if there are no pending constant pool entries. - if (num_pending_reloc_info_ == 0) { - // Calculate the offset of the next check. - next_buffer_check_ = pc_offset() + kCheckPoolInterval; - return; - } - - // We emit a constant pool when: - // * requested to do so by parameter force_emit (e.g. after each function). - // * the distance to the first instruction accessing the constant pool is - // kAvgDistToPool or more. - // * no jump is required and the distance to the first instruction accessing - // the constant pool is at least kMaxDistToPool / 2. - ASSERT(first_const_pool_use_ >= 0); - int dist = pc_offset() - first_const_pool_use_; - if (!force_emit && dist < kAvgDistToPool && - (require_jump || (dist < (kMaxDistToPool / 2)))) { - return; - } - - // Check that the code buffer is large enough before emitting the constant - // pool (include the jump over the pool and the constant pool marker and - // the gap to the relocation information). int jump_instr = require_jump ? kInstrSize : 0; - int needed_space = jump_instr + kInstrSize + - num_pending_reloc_info_ * kInstrSize + kGap; - while (buffer_space() <= needed_space) GrowBuffer(); - - { - // Block recursive calls to CheckConstPool. - BlockConstPoolScope block_const_pool(this); - - // Emit jump over constant pool if necessary. - Label after_pool; - if (require_jump) { - b(&after_pool); - } - RecordComment("[ Constant Pool"); - - // Put down constant pool marker "Undefined instruction" as specified by - // A5.6 (ARMv7) Instruction set encoding. - emit(kConstantPoolMarker | num_pending_reloc_info_); - - // Emit constant pool entries. - for (int i = 0; i < num_pending_reloc_info_; i++) { - RelocInfo& rinfo = pending_reloc_info_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && - rinfo.rmode() != RelocInfo::POSITION && - rinfo.rmode() != RelocInfo::STATEMENT_POSITION); - - Instr instr = instr_at(rinfo.pc()); - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. - ASSERT(IsLdrPcImmediateOffset(instr) && - GetLdrRegisterImmediateOffset(instr) == 0); - - int delta = pc_ - rinfo.pc() - kPcLoadDelta; - // 0 is the smallest delta: - // ldr rd, [pc, #0] - // constant pool marker - // data - ASSERT(is_uint12(delta)); - - instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); - emit(rinfo.data()); + // Check that the code buffer is large enough before emitting the constant + // pool and relocation information (include the jump over the pool and the + // constant pool marker). + int max_needed_space = + jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize); + while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer(); + + // Block recursive calls to CheckConstPool. + BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + + num_prinfo_*kInstrSize); + // Don't bother to check for the emit calls below. + next_buffer_check_ = no_const_pool_before_; + + // Emit jump over constant pool if necessary. + Label after_pool; + if (require_jump) b(&after_pool); + + RecordComment("[ Constant Pool"); + + // Put down constant pool marker "Undefined instruction" as specified by + // A3.1 Instruction set encoding. + emit(0x03000000 | num_prinfo_); + + // Emit constant pool entries. + for (int i = 0; i < num_prinfo_; i++) { + RelocInfo& rinfo = prinfo_[i]; + ASSERT(rinfo.rmode() != RelocInfo::COMMENT && + rinfo.rmode() != RelocInfo::POSITION && + rinfo.rmode() != RelocInfo::STATEMENT_POSITION); + Instr instr = instr_at(rinfo.pc()); + + // Instruction to patch must be a ldr/str [pc, #offset]. + // P and U set, B and W clear, Rn == pc, offset12 still 0. + ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) == + (2*B25 | P | U | pc.code()*B16)); + int delta = pc_ - rinfo.pc() - 8; + ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 + if (delta < 0) { + instr &= ~U; + delta = -delta; } + ASSERT(is_uint12(delta)); + instr_at_put(rinfo.pc(), instr + delta); + emit(rinfo.data()); + } + num_prinfo_ = 0; + last_const_pool_end_ = pc_offset(); - num_pending_reloc_info_ = 0; - first_const_pool_use_ = -1; - - RecordComment("]"); + RecordComment("]"); - if (after_pool.is_linked()) { - bind(&after_pool); - } + if (after_pool.is_linked()) { + bind(&after_pool); } // Since a constant pool was just emitted, move the check offset forward by // the standard interval. - next_buffer_check_ = pc_offset() + kCheckPoolInterval; + next_buffer_check_ = pc_offset() + kCheckConstInterval; } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index fbf610a434..f5eb5075f6 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -32,7 +32,7 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // A light-weight ARM Assembler // Generates user mode instructions for the ARM architecture up to version 5 @@ -72,7 +72,6 @@ namespace internal { struct Register { static const int kNumRegisters = 16; static const int kNumAllocatableRegisters = 8; - static const int kSizeInBytes = 4; static int ToAllocationIndex(Register reg) { ASSERT(reg.code() < kNumAllocatableRegisters); @@ -167,14 +166,13 @@ struct SwVfpRegister { // Double word VFP register. struct DwVfpRegister { + // d0 has been excluded from allocation. This is following ia32 + // where xmm0 is excluded. This should be revisited. + // Currently d0 is used as a scratch register. + // d1 has also been excluded from allocation to be used as a scratch + // register as well. static const int kNumRegisters = 16; - // A few double registers are reserved: one as a scratch register and one to - // hold 0.0, that does not fit in the immediate field of vmov instructions. - // d14: 0.0 - // d15: scratch register. - static const int kNumReservedRegisters = 2; - static const int kNumAllocatableRegisters = kNumRegisters - - kNumReservedRegisters; + static const int kNumAllocatableRegisters = 15; static int ToAllocationIndex(DwVfpRegister reg) { ASSERT(reg.code() != 0); @@ -189,7 +187,6 @@ struct DwVfpRegister { static const char* AllocationIndexToString(int index) { ASSERT(index >= 0 && index < kNumAllocatableRegisters); const char* const names[] = { - "d0", "d1", "d2", "d3", @@ -202,7 +199,9 @@ struct DwVfpRegister { "d10", "d11", "d12", - "d13" + "d13", + "d14", + "d15" }; return names[index]; } @@ -303,11 +302,6 @@ const DwVfpRegister d13 = { 13 }; const DwVfpRegister d14 = { 14 }; const DwVfpRegister d15 = { 15 }; -// Aliases for double registers. -const DwVfpRegister kFirstCalleeSavedDoubleReg = d8; -const DwVfpRegister kLastCalleeSavedDoubleReg = d15; -const DwVfpRegister kDoubleRegZero = d14; - // Coprocessor register struct CRegister { @@ -378,6 +372,7 @@ class Operand BASE_EMBEDDED { INLINE(explicit Operand(int32_t immediate, RelocInfo::Mode rmode = RelocInfo::NONE)); INLINE(explicit Operand(const ExternalReference& f)); + INLINE(explicit Operand(const char* s)); explicit Operand(Handle<Object> handle); INLINE(explicit Operand(Smi* value)); @@ -394,11 +389,8 @@ class Operand BASE_EMBEDDED { INLINE(bool is_reg() const); // Return true if this operand fits in one instruction so that no - // 2-instruction solution with a load into the ip register is necessary. If - // the instruction this operand is used for is a MOV or MVN instruction the - // actual instruction to use is required for this calculation. For other - // instructions instr is ignored. - bool is_single_instruction(Instr instr = 0) const; + // 2-instruction solution with a load into the ip register is necessary. + bool is_single_instruction() const; bool must_use_constant_pool() const; inline int32_t immediate() const { @@ -455,7 +447,6 @@ class MemOperand BASE_EMBEDDED { Register rn() const { return rn_; } Register rm() const { return rm_; } - AddrMode am() const { return am_; } bool OffsetIsUint12Encodable() const { return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); @@ -478,98 +469,43 @@ class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). - static void Probe(); + static void Probe(bool portable); // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { - ASSERT(initialized_); if (f == VFP3 && !FLAG_enable_vfp3) return false; return (supported_ & (1u << f)) != 0; } -#ifdef DEBUG // Check whether a feature is currently enabled. static bool IsEnabled(CpuFeature f) { - ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); - return (enabled & (1u << f)) != 0; + return (enabled_ & (1u << f)) != 0; } -#endif // Enable a specified feature within a scope. class Scope BASE_EMBEDDED { #ifdef DEBUG - public: explicit Scope(CpuFeature f) { - unsigned mask = 1u << f; ASSERT(CpuFeatures::IsSupported(f)); ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } + (found_by_runtime_probing_ & (1u << f)) == 0); + old_enabled_ = CpuFeatures::enabled_; + CpuFeatures::enabled_ |= 1u << f; } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - + ~Scope() { CpuFeatures::enabled_ = old_enabled_; } private: - Isolate* isolate_; unsigned old_enabled_; #else - public: explicit Scope(CpuFeature f) {} #endif }; - class TryForceFeatureScope BASE_EMBEDDED { - public: - explicit TryForceFeatureScope(CpuFeature f) - : old_supported_(CpuFeatures::supported_) { - if (CanForce()) { - CpuFeatures::supported_ |= (1u << f); - } - } - - ~TryForceFeatureScope() { - if (CanForce()) { - CpuFeatures::supported_ = old_supported_; - } - } - - private: - static bool CanForce() { - // It's only safe to temporarily force support of CPU features - // when there's only a single isolate, which is guaranteed when - // the serializer is enabled. - return Serializer::enabled(); - } - - const unsigned old_supported_; - }; - private: -#ifdef DEBUG - static bool initialized_; -#endif static unsigned supported_; + static unsigned enabled_; static unsigned found_by_runtime_probing_; - - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -597,7 +533,7 @@ extern const Instr kAndBicFlip; -class Assembler : public AssemblerBase { +class Assembler : public Malloced { public: // Create an assembler. Instructions and relocation information are emitted // into a buffer, with the instructions starting from the beginning and the @@ -612,12 +548,9 @@ class Assembler : public AssemblerBase { // for code generation and assumes its size to be buffer_size. If the buffer // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. - Assembler(Isolate* isolate, void* buffer, int buffer_size); + Assembler(void* buffer, int buffer_size); ~Assembler(); - // Overrides the default provided by FLAG_debug_code. - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other // Assembler functions are invoked in between GetCode() calls. @@ -956,6 +889,16 @@ class Assembler : public AssemblerBase { void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, LFlag l = Short); // v5 and above + void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst, + LFlag l = Short, Condition cond = al); + void stc(Coprocessor coproc, CRegister crd, Register base, int option, + LFlag l = Short, Condition cond = al); + + void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst, + LFlag l = Short); // v5 and above + void stc2(Coprocessor coproc, CRegister crd, Register base, int option, + LFlag l = Short); // v5 and above + // Support for VFP. // All these APIs support S0 to S31 and D0 to D15. // Currently these APIs do not support extended D registers, i.e, D16 to D31. @@ -994,30 +937,6 @@ class Assembler : public AssemblerBase { const MemOperand& dst, const Condition cond = al); - void vldm(BlockAddrMode am, - Register base, - DwVfpRegister first, - DwVfpRegister last, - Condition cond = al); - - void vstm(BlockAddrMode am, - Register base, - DwVfpRegister first, - DwVfpRegister last, - Condition cond = al); - - void vldm(BlockAddrMode am, - Register base, - SwVfpRegister first, - SwVfpRegister last, - Condition cond = al); - - void vstm(BlockAddrMode am, - Register base, - SwVfpRegister first, - SwVfpRegister last, - Condition cond = al); - void vmov(const DwVfpRegister dst, double imm, const Condition cond = al); @@ -1070,9 +989,6 @@ class Assembler : public AssemblerBase { VFPConversionMode mode = kDefaultRoundToZero, const Condition cond = al); - void vneg(const DwVfpRegister dst, - const DwVfpRegister src, - const Condition cond = al); void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond = al); @@ -1140,13 +1056,8 @@ class Assembler : public AssemblerBase { void jmp(Label* L) { b(L, al); } // Check the code size generated from label to here. - int SizeOfCodeGeneratedSince(Label* label) { - return pc_offset() - label->pos(); - } - - // Check the number of instructions generated from label to here. - int InstructionsGeneratedSince(Label* label) { - return SizeOfCodeGeneratedSince(label) / kInstrSize; + int InstructionsGeneratedSince(Label* l) { + return (pc_offset() - l->pos()) / kInstrSize; } // Check whether an immediate fits an addressing mode 1 instruction. @@ -1168,6 +1079,10 @@ class Assembler : public AssemblerBase { DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); }; + // Postpone the generation of the constant pool for the specified number of + // instructions. + void BlockConstPoolFor(int instructions); + // Debugging // Mark address of the ExitJSFrame code. @@ -1176,10 +1091,6 @@ class Assembler : public AssemblerBase { // Mark address of a debug break slot. void RecordDebugBreakSlot(); - // Record the AST id of the CallIC being compiled, so that it can be placed - // in the relocation information. - void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; } - // Record a comment relocation entry that can be used by a disassembler. // Use --code-comments to enable. void RecordComment(const char* msg); @@ -1195,6 +1106,12 @@ class Assembler : public AssemblerBase { PositionsRecorder* positions_recorder() { return &positions_recorder_; } + bool can_peephole_optimize(int instructions) { + if (!allow_peephole_optimization_) return false; + if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false; + return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize; + } + // Read/patch instructions static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } static void instr_at_put(byte* pc, Instr instr) { @@ -1227,27 +1144,10 @@ class Assembler : public AssemblerBase { static int GetCmpImmediateRawImmediate(Instr instr); static bool IsNop(Instr instr, int type = NON_MARKING_NOP); - // Constants in pools are accessed via pc relative addressing, which can - // reach +/-4KB thereby defining a maximum distance between the instruction - // and the accessed constant. - static const int kMaxDistToPool = 4*KB; - static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize; - - // Postpone the generation of the constant pool for the specified number of - // instructions. - void BlockConstPoolFor(int instructions); - - // Check if is time to emit a constant pool. + // Check if is time to emit a constant pool for pending reloc info entries void CheckConstPool(bool force_emit, bool require_jump); protected: - // Relocation for a type-recording IC has the AST id added to it. This - // member variable is a way to pass the information from the call site to - // the relocation info. - unsigned ast_id_for_reloc_info_; - - bool emit_debug_code() const { return emit_debug_code_; } - int buffer_space() const { return reloc_info_writer.pos() - pc_; } // Read/patch instructions @@ -1262,37 +1162,18 @@ class Assembler : public AssemblerBase { // Patch branch instruction at pos to branch to given branch target pos void target_at_put(int pos, int target_pos); - // Prevent contant pool emission until EndBlockConstPool is called. - // Call to this function can be nested but must be followed by an equal - // number of call to EndBlockConstpool. - void StartBlockConstPool() { - if (const_pool_blocked_nesting_++ == 0) { - // Prevent constant pool checks happening by setting the next check to - // the biggest possible offset. - next_buffer_check_ = kMaxInt; - } + // Block the emission of the constant pool before pc_offset + void BlockConstPoolBefore(int pc_offset) { + if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset; } - // Resume constant pool emission. Need to be called as many time as - // StartBlockConstPool to have an effect. - void EndBlockConstPool() { - if (--const_pool_blocked_nesting_ == 0) { - // Check the constant pool hasn't been blocked for too long. - ASSERT((num_pending_reloc_info_ == 0) || - (pc_offset() < (first_const_pool_use_ + kMaxDistToPool))); - // Two cases: - // * no_const_pool_before_ >= next_buffer_check_ and the emission is - // still blocked - // * no_const_pool_before_ < next_buffer_check_ and the next emit will - // trigger a check. - next_buffer_check_ = no_const_pool_before_; - } + void StartBlockConstPool() { + const_pool_blocked_nesting_++; } - - bool is_const_pool_blocked() const { - return (const_pool_blocked_nesting_ > 0) || - (pc_offset() < no_const_pool_before_); + void EndBlockConstPool() { + const_pool_blocked_nesting_--; } + bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; } private: // Code buffer: @@ -1302,6 +1183,9 @@ class Assembler : public AssemblerBase { // True if the assembler owns the buffer, false if buffer is external. bool own_buffer_; + // Buffer size and constant pool distance are checked together at regular + // intervals of kBufferCheckInterval emitted bytes + static const int kBufferCheckInterval = 1*KB/2; int next_buffer_check_; // pc offset of next buffer check // Code generation @@ -1326,41 +1210,40 @@ class Assembler : public AssemblerBase { // expensive. By default we only check again once a number of instructions // has been generated. That also means that the sizing of the buffers is not // an exact science, and that we rely on some slop to not overrun buffers. - static const int kCheckPoolIntervalInst = 32; - static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; + static const int kCheckConstIntervalInst = 32; + static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; + + // Pools are emitted after function return and in dead code at (more or less) + // regular intervals of kDistBetweenPools bytes + static const int kDistBetweenPools = 1*KB; - // Average distance beetween a constant pool and the first instruction - // accessing the constant pool. Longer distance should result in less I-cache - // pollution. - // In practice the distance will be smaller since constant pool emission is - // forced after function return and sometimes after unconditional branches. - static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval; + // Constants in pools are accessed via pc relative addressing, which can + // reach +/-4KB thereby defining a maximum distance between the instruction + // and the accessed constant. We satisfy this constraint by limiting the + // distance between pools. + static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval; // Emission of the constant pool may be blocked in some code sequences. int const_pool_blocked_nesting_; // Block emission if this is not zero. int no_const_pool_before_; // Block emission before this pc offset. - // Keep track of the first instruction requiring a constant pool entry - // since the previous constant pool was emitted. - int first_const_pool_use_; + // Keep track of the last emitted pool to guarantee a maximal distance + int last_const_pool_end_; // pc offset following the last constant pool // Relocation info generation // Each relocation is encoded as a variable size value static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; RelocInfoWriter reloc_info_writer; - // Relocation info records are also used during code generation as temporary // containers for constants and code target addresses until they are emitted // to the constant pool. These pending relocation info records are temporarily // stored in a separate buffer until a constant pool is emitted. // If every instruction in a long sequence is accessing the pool, we need one // pending relocation entry per instruction. - - // the buffer of pending relocation info - RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo]; - // number of pending reloc info entries in the buffer - int num_pending_reloc_info_; + static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize; + RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info + int num_prinfo_; // number of pending reloc info entries in the buffer // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; @@ -1392,7 +1275,7 @@ class Assembler : public AssemblerBase { friend class BlockConstPoolScope; PositionsRecorder positions_recorder_; - bool emit_debug_code_; + bool allow_peephole_optimization_; friend class PositionsRecorder; friend class EnsureSpace; }; diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 328102bb40..6e8fe28a2b 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -29,7 +29,7 @@ #if defined(V8_TARGET_ARCH_ARM) -#include "codegen.h" +#include "codegen-inl.h" #include "debug.h" #include "deoptimizer.h" #include "full-codegen.h" @@ -68,7 +68,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // JumpToExternalReference expects r0 to contain the number of arguments // including the receiver and the extra arguments. __ add(r0, r0, Operand(num_extra_args + 1)); - __ JumpToExternalReference(ExternalReference(id, masm->isolate())); + __ JumpToExternalReference(ExternalReference(id)); } @@ -310,7 +310,6 @@ static void AllocateJSArray(MacroAssembler* masm, // construct call and normal call. static void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { - Counters* counters = masm->isolate()->counters(); Label argc_one_or_more, argc_two_or_more; // Check for array construction with zero arguments or one. @@ -326,7 +325,7 @@ static void ArrayNativeCode(MacroAssembler* masm, r5, JSArray::kPreallocatedArrayElements, call_generic_code); - __ IncrementCounter(counters->array_function_native(), 1, r3, r4); + __ IncrementCounter(&Counters::array_function_native, 1, r3, r4); // Setup return value, remove receiver from stack and return. __ mov(r0, r2); __ add(sp, sp, Operand(kPointerSize)); @@ -362,7 +361,7 @@ static void ArrayNativeCode(MacroAssembler* masm, r7, true, call_generic_code); - __ IncrementCounter(counters->array_function_native(), 1, r2, r4); + __ IncrementCounter(&Counters::array_function_native, 1, r2, r4); // Setup return value, remove receiver and argument from stack and return. __ mov(r0, r3); __ add(sp, sp, Operand(2 * kPointerSize)); @@ -386,7 +385,7 @@ static void ArrayNativeCode(MacroAssembler* masm, r7, false, call_generic_code); - __ IncrementCounter(counters->array_function_native(), 1, r2, r6); + __ IncrementCounter(&Counters::array_function_native, 1, r2, r6); // Fill arguments as array elements. Copy from the top of the stack (last // element) to the array backing store filling it backwards. Note: @@ -429,7 +428,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { GenerateLoadArrayFunction(masm, r1); if (FLAG_debug_code) { - // Initial map for the builtin Array functions should be maps. + // Initial map for the builtin Array function shoud be a map. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ tst(r2, Operand(kSmiTagMask)); __ Assert(ne, "Unexpected initial map for Array function"); @@ -443,9 +442,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { // Jump to the generic array code if the specialized code cannot handle // the construction. __ bind(&generic_array_code); - - Handle<Code> array_code = - masm->isolate()->builtins()->ArrayCodeGeneric(); + Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric); + Handle<Code> array_code(code); __ Jump(array_code, RelocInfo::CODE_TARGET); } @@ -460,8 +458,11 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { Label generic_constructor; if (FLAG_debug_code) { - // The array construct code is only set for the builtin and internal - // Array functions which always have a map. + // The array construct code is only set for the builtin Array function which + // always have a map. + GenerateLoadArrayFunction(masm, r2); + __ cmp(r1, r2); + __ Assert(eq, "Unexpected Array function"); // Initial map for the builtin Array function should be a map. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ tst(r2, Operand(kSmiTagMask)); @@ -476,8 +477,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // Jump to the generic construct code in case the specialized code cannot // handle the construction. __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); + Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); + Handle<Code> generic_construct_stub(code); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } @@ -490,8 +491,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) // -- sp[argc * 4] : receiver // ----------------------------------- - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3); + __ IncrementCounter(&Counters::string_ctor_calls, 1, r2, r3); Register function = r1; if (FLAG_debug_code) { @@ -521,7 +521,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { r5, // Scratch. false, // Is it a Smi? ¬_cached); - __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4); + __ IncrementCounter(&Counters::string_ctor_cached_number, 1, r3, r4); __ bind(&argument_is_string); // ----------- S t a t e ------------- @@ -575,16 +575,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ tst(r3, Operand(kIsNotStringMask)); __ b(ne, &convert_argument); __ mov(argument, r0); - __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); + __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4); __ b(&argument_is_string); // Invoke the conversion builtin and put the result into r2. __ bind(&convert_argument); __ push(function); // Preserve the function. - __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); + __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4); __ EnterInternalFrame(); __ push(r0); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS); __ LeaveInternalFrame(); __ pop(function); __ mov(argument, r0); @@ -600,7 +600,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // At this point the argument is already a string. Call runtime to // create a string wrapper. __ bind(&gc_required); - __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4); + __ IncrementCounter(&Counters::string_ctor_gc_required, 1, r3, r4); __ EnterInternalFrame(); __ push(argument); __ CallRuntime(Runtime::kNewStringWrapper, 1); @@ -619,7 +619,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { Label non_function_call; // Check that the function is not a smi. - __ JumpIfSmi(r1, &non_function_call); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &non_function_call); // Check that the function is a JSFunction. __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ b(ne, &non_function_call); @@ -635,8 +636,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // Set expected number of arguments to zero (not changing r0). __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); - __ SetCallKind(r5, CALL_AS_METHOD); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET); } @@ -647,8 +647,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Should never count constructions for api objects. ASSERT(!is_api_function || !count_constructions); - Isolate* isolate = masm->isolate(); - // Enter a construct frame. __ EnterConstructFrame(); @@ -664,7 +662,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); + ExternalReference::debug_step_in_fp_address(); __ mov(r2, Operand(debug_step_in_fp)); __ ldr(r2, MemOperand(r2)); __ tst(r2, r2); @@ -674,7 +672,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Load the initial map and verify that it is in fact a map. // r1: constructor function __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &rt_call); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &rt_call); __ CompareObjectType(r2, r3, r4, MAP_TYPE); __ b(ne, &rt_call); @@ -909,15 +908,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // r1: constructor function if (is_api_function) { __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); + Handle<Code> code = Handle<Code>( + Builtins::builtin(Builtins::HandleApiCallConstruct)); ParameterCount expected(0); __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + RelocInfo::CODE_TARGET, CALL_FUNCTION); } else { ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeFunction(r1, actual, CALL_FUNCTION); } // Pop the function from the stack. @@ -944,11 +942,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // sp[0]: receiver (newly allocated object) // sp[1]: constructor function // sp[2]: number of arguments (smi-tagged) - __ JumpIfSmi(r0, &use_receiver); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &use_receiver); // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); + // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE); __ b(ge, &exit); // Throw away the result of the constructor invocation and use the @@ -967,7 +966,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); - __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2); + __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2); __ Jump(lr); } @@ -1007,8 +1006,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Set up the roots register. - ExternalReference roots_address = - ExternalReference::roots_address(masm->isolate()); + ExternalReference roots_address = ExternalReference::roots_address(); __ mov(r10, Operand(roots_address)); // Push the function and the receiver onto the stack. @@ -1044,11 +1042,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code and pass argc as r0. __ mov(r0, Operand(r3)); if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall()); + __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)), + RelocInfo::CODE_TARGET); } else { ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeFunction(r1, actual, CALL_FUNCTION); } // Exit the JS frame and remove the parameters (except function), and return. @@ -1076,17 +1074,12 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Preserve the function. __ push(r1); - // Push call kind information. - __ push(r5); // Push the function on the stack as the argument to the runtime function. __ push(r1); __ CallRuntime(Runtime::kLazyCompile, 1); // Calculate the entry point. __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - - // Restore call kind information. - __ pop(r5); // Restore saved function. __ pop(r1); @@ -1104,17 +1097,12 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Preserve the function. __ push(r1); - // Push call kind information. - __ push(r5); // Push the function on the stack as the argument to the runtime function. __ push(r1); __ CallRuntime(Runtime::kLazyRecompile, 1); // Calculate the entry point. __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - - // Restore call kind information. - __ pop(r5); // Restore saved function. __ pop(r1); @@ -1182,11 +1170,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - CpuFeatures::TryForceFeatureScope scope(VFP3); - if (!CpuFeatures::IsSupported(VFP3)) { - __ Abort("Unreachable code: Cannot optimize without VFP3 support."); - return; - } + // Probe the CPU to set the supported features, because this builtin + // may be called before the initialization performs CPU setup. + CpuFeatures::Probe(false); // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. @@ -1232,7 +1218,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // r0: actual number of arguments Label non_function; __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); - __ JumpIfSmi(r1, &non_function); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &non_function); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ b(ne, &non_function); @@ -1246,33 +1233,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Do not transform the receiver for strict mode functions. __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); __ b(ne, &shift_arguments); - // Do not transform the receiver for native (Compilerhints already in r3). - __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &shift_arguments); - // Compute the receiver in non-strict mode. __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ ldr(r2, MemOperand(r2, -kPointerSize)); // r0: actual number of arguments // r1: function // r2: first argument - __ JumpIfSmi(r2, &convert_to_object); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &convert_to_object); - __ LoadRoot(r3, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r3, Heap::kNullValueRootIndex); __ cmp(r2, r3); __ b(eq, &use_global_receiver); - __ LoadRoot(r3, Heap::kNullValueRootIndex); + __ LoadRoot(r3, Heap::kUndefinedValueRootIndex); __ cmp(r2, r3); __ b(eq, &use_global_receiver); - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &shift_arguments); + __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, &convert_to_object); + __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE)); + __ b(le, &shift_arguments); __ bind(&convert_to_object); __ EnterInternalFrame(); // In order to preserve argument count. @@ -1280,7 +1265,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ push(r0); __ push(r2); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); __ mov(r2, r0); __ pop(r0); @@ -1350,9 +1335,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Expected number of arguments is 0 for CALL_NON_FUNCTION. __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); - __ SetCallKind(r5, CALL_AS_METHOD); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), + RelocInfo::CODE_TARGET); __ bind(&function); } @@ -1366,15 +1350,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); __ mov(r2, Operand(r2, ASR, kSmiTagSize)); __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); - __ SetCallKind(r5, CALL_AS_METHOD); __ cmp(r2, r0); // Check formal and actual parameter counts. - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET, - ne); + __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), + RelocInfo::CODE_TARGET, ne); ParameterCount expected(0); - __ InvokeCode(r3, expected, expected, JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeCode(r3, expected, expected, JUMP_FUNCTION); } @@ -1391,7 +1372,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ push(r0); __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS); // Check the stack for overflow. We are not trying need to catch // interruptions (e.g. debug break and preemption) here, so the "real stack @@ -1409,7 +1390,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ ldr(r1, MemOperand(fp, kFunctionOffset)); __ push(r1); __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS); // End of stack check. // Push current limit and index. @@ -1429,17 +1410,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ ldr(r0, MemOperand(fp, kRecvOffset)); // Do not transform the receiver for strict mode functions. - __ ldr(r2, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); __ b(ne, &push_receiver); - // Do not transform the receiver for strict mode functions. - __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &push_receiver); - // Compute the receiver in non-strict mode. - __ JumpIfSmi(r0, &call_to_object); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &call_to_object); __ LoadRoot(r1, Heap::kNullValueRootIndex); __ cmp(r0, r1); __ b(eq, &use_global_receiver); @@ -1449,15 +1427,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Check if the receiver is already a JavaScript object. // r0: receiver - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &push_receiver); + __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); + __ b(lt, &call_to_object); + __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); + __ b(le, &push_receiver); // Convert the receiver to a regular object. // r0: receiver __ bind(&call_to_object); __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); __ b(&push_receiver); // Use the current global receiver object as the receiver. @@ -1507,8 +1486,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { ParameterCount actual(r0); __ mov(r0, Operand(r0, ASR, kSmiTagSize)); __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeFunction(r1, actual, CALL_FUNCTION); // Tear down the internal frame and remove function, receiver and args. __ LeaveInternalFrame(); @@ -1545,7 +1523,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // -- r1 : function (passed through to callee) // -- r2 : expected number of arguments // -- r3 : code entry to call - // -- r5 : call kind information // ----------------------------------- Label invoke, dont_adapt_arguments; diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 3c97332346..e8f217d276 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -55,30 +55,23 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register rhs); -// Check if the operand is a heap number. -static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, - Register scratch1, Register scratch2, - Label* not_a_heap_number) { - __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); - __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch1, scratch2); - __ b(ne, not_a_heap_number); -} - - void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in eax. Label check_heap_number, call_builtin; - __ JumpIfNotSmi(r0, &check_heap_number); + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &check_heap_number); __ Ret(); __ bind(&check_heap_number); - EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin); + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, ip); + __ b(ne, &call_builtin); __ Ret(); __ bind(&call_builtin); __ push(r0); - __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS); } @@ -98,15 +91,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { &gc, TAG_OBJECT); - int map_index = strict_mode_ == kStrictMode - ? Context::STRICT_MODE_FUNCTION_MAP_INDEX - : Context::FUNCTION_MAP_INDEX; - // Compute the function map in the current global context and set that // as the map of the allocated object. __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); - __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index))); + __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); // Initialize the rest of the function. We don't have to update the @@ -157,7 +146,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { __ ldr(r3, MemOperand(sp, 0)); // Setup the object header. - __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex); + __ LoadRoot(r2, Heap::kContextMapRootIndex); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ mov(r2, Operand(Smi::FromInt(length))); __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); @@ -165,10 +154,11 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Setup the fixed slots. __ mov(r1, Operand(Smi::FromInt(0))); __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); - __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); + __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); - // Copy the global object from the previous context. + // Copy the global object from the surrounding context. __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); @@ -185,7 +175,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Need to collect. Call into runtime system. __ bind(&gc); - __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); } @@ -314,9 +304,13 @@ class ConvertToDoubleStub : public CodeStub { void ConvertToDoubleStub::Generate(MacroAssembler* masm) { +#ifndef BIG_ENDIAN_FLOATING_POINT Register exponent = result1_; Register mantissa = result2_; - +#else + Register exponent = result2_; + Register mantissa = result1_; +#endif Label not_special; // Convert from Smi to integer. __ mov(source_, Operand(source_, ASR, kSmiTagSize)); @@ -370,6 +364,138 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { } +class FloatingPointHelper : public AllStatic { + public: + + enum Destination { + kVFPRegisters, + kCoreRegisters + }; + + + // Loads smis from r0 and r1 (right and left in binary operations) into + // floating point registers. Depending on the destination the values ends up + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is + // floating point registers VFP3 must be supported. If core registers are + // requested when VFP3 is supported d6 and d7 will be scratched. + static void LoadSmis(MacroAssembler* masm, + Destination destination, + Register scratch1, + Register scratch2); + + // Loads objects from r0 and r1 (right and left in binary operations) into + // floating point registers. Depending on the destination the values ends up + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is + // floating point registers VFP3 must be supported. If core registers are + // requested when VFP3 is supported d6 and d7 will still be scratched. If + // either r0 or r1 is not a number (not smi and not heap number object) the + // not_number label is jumped to with r0 and r1 intact. + static void LoadOperands(MacroAssembler* masm, + FloatingPointHelper::Destination destination, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number); + + // Loads the number from object into dst as a 32-bit integer if possible. If + // the object cannot be converted to a 32-bit integer control continues at + // the label not_int32. If VFP is supported double_scratch is used + // but not scratch2. + // Floating point value in the 32-bit integer range will be rounded + // to an integer. + static void LoadNumberAsInteger(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + DwVfpRegister double_scratch, + Label* not_int32); + + // Load the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + static void LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + Destination destination, + DwVfpRegister double_dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + SwVfpRegister single_scratch, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + // scratch3 is not used when VFP3 is supported. + static void LoadNumberAsInt32(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch, + Label* not_int32); + + // Generate non VFP3 code to check if a double can be exactly represented by a + // 32-bit integer. This does not check for 0 or -0, which need + // to be checked for separately. + // Control jumps to not_int32 if the value is not a 32-bit integer, and falls + // through otherwise. + // src1 and src2 will be cloberred. + // + // Expected input: + // - src1: higher (exponent) part of the double value. + // - src2: lower (mantissa) part of the double value. + // Output status: + // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) + // - src2: contains 1. + // - other registers are clobbered. + static void DoubleIs32BitInteger(MacroAssembler* masm, + Register src1, + Register src2, + Register dst, + Register scratch, + Label* not_int32); + + // Generates code to call a C function to do a double operation using core + // registers. (Used when VFP3 is not supported.) + // This code never falls through, but returns with a heap number containing + // the result in r0. + // Register heapnumber_result must be a heap number in which the + // result of the operation will be stored. + // Requires the following layout on entry: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + static void CallCCodeForDoubleOperation(MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch); + + private: + static void LoadNumber(MacroAssembler* masm, + FloatingPointHelper::Destination destination, + Register object, + DwVfpRegister dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number); +}; + + void FloatingPointHelper::LoadSmis(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register scratch1, @@ -392,11 +518,11 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, __ mov(scratch1, Operand(r0)); ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); __ push(lr); - __ Call(stub1.GetCode()); - // Write Smi from r1 to r1 and r0 in double format. + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Write Smi from r1 to r1 and r0 in double format. r9 is scratch. __ mov(scratch1, Operand(r1)); ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); - __ Call(stub2.GetCode()); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); __ pop(lr); } } @@ -442,8 +568,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); // Handle loading a double from a heap number. - if (CpuFeatures::IsSupported(VFP3) && - destination == kVFPRegisters) { + if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { CpuFeatures::Scope scope(VFP3); // Load the double from tagged HeapNumber to double register. __ sub(scratch1, object, Operand(kHeapObjectTag)); @@ -473,7 +598,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ mov(scratch1, Operand(object)); ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); __ push(lr); - __ Call(stub.GetCode()); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); __ pop(lr); } @@ -481,69 +606,57 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, } -void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch, - Label* not_number) { +void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, + Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + DwVfpRegister double_scratch, + Label* not_int32) { if (FLAG_debug_code) { __ AbortIfNotRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); } - Label is_smi; - Label done; - Label not_in_int32_range; - + Label is_smi, done; __ JumpIfSmi(object, &is_smi); __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); __ cmp(scratch1, heap_number_map); - __ b(ne, not_number); - __ ConvertToInt32(object, - dst, - scratch1, - scratch2, - double_scratch, - ¬_in_int32_range); - __ jmp(&done); - - __ bind(¬_in_int32_range); - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); - - __ EmitOutOfInt32RangeTruncate(dst, - scratch1, - scratch2, - scratch3); + __ b(ne, not_int32); + __ ConvertToInt32( + object, dst, scratch1, scratch2, double_scratch, not_int32); __ jmp(&done); - __ bind(&is_smi); __ SmiUntag(dst, object); __ bind(&done); } -void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, - Register int_scratch, - Destination destination, - DwVfpRegister double_dst, - Register dst1, - Register dst2, - Register scratch2, - SwVfpRegister single_scratch) { - ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst1)); - ASSERT(!int_scratch.is(dst2)); +void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, + Register object, + Destination destination, + DwVfpRegister double_dst, + Register dst1, + Register dst2, + Register heap_number_map, + Register scratch1, + Register scratch2, + SwVfpRegister single_scratch, + Label* not_int32) { + ASSERT(!scratch1.is(object) && !scratch2.is(object)); + ASSERT(!scratch1.is(scratch2)); + ASSERT(!heap_number_map.is(object) && + !heap_number_map.is(scratch1) && + !heap_number_map.is(scratch2)); - Label done; + Label done, obj_is_not_smi; + __ JumpIfNotSmi(object, &obj_is_not_smi); + __ SmiUntag(scratch1, object); if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); - __ vmov(single_scratch, int_scratch); + __ vmov(single_scratch, scratch1); __ vcvt_f64_s32(double_dst, single_scratch); if (destination == kCoreRegisters) { __ vmov(dst1, dst2, double_dst); @@ -551,79 +664,53 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, } else { Label fewer_than_20_useful_bits; // Expected output: - // | dst2 | dst1 | + // | dst1 | dst2 | // | s | exp | mantissa | // Check for zero. - __ cmp(int_scratch, Operand(0)); - __ mov(dst2, int_scratch); - __ mov(dst1, int_scratch); + __ cmp(scratch1, Operand(0)); + __ mov(dst1, scratch1); + __ mov(dst2, scratch1); __ b(eq, &done); // Preload the sign of the value. - __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC); + __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC); // Get the absolute value of the object (as an unsigned integer). - __ rsb(int_scratch, int_scratch, Operand(0), SetCC, mi); + __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); // Get mantisssa[51:20]. // Get the position of the first set bit. - __ CountLeadingZeros(dst1, int_scratch, scratch2); - __ rsb(dst1, dst1, Operand(31)); + __ CountLeadingZeros(dst2, scratch1, scratch2); + __ rsb(dst2, dst2, Operand(31)); // Set the exponent. - __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst2, scratch2, scratch2, + __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst1, scratch2, scratch2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Clear the first non null bit. __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1)); + __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2)); - __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); __ b(mi, &fewer_than_20_useful_bits); // Set the higher 20 bits of the mantissa. - __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2)); + __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2)); __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst1, Operand(int_scratch, LSL, scratch2)); + __ mov(dst2, Operand(scratch1, LSL, scratch2)); __ b(&done); __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); - __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst2, dst2, scratch2); - // Set dst1 to 0. - __ mov(dst1, Operand(0)); + __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ mov(scratch2, Operand(scratch1, LSL, scratch2)); + __ orr(dst1, dst1, scratch2); + // Set dst2 to 0. + __ mov(dst2, Operand(0)); } - __ bind(&done); -} - - -void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, - Register object, - Destination destination, - DwVfpRegister double_dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - SwVfpRegister single_scratch, - Label* not_int32) { - ASSERT(!scratch1.is(object) && !scratch2.is(object)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!heap_number_map.is(object) && - !heap_number_map.is(scratch1) && - !heap_number_map.is(scratch2)); - Label done, obj_is_not_smi; - - __ JumpIfNotSmi(object, &obj_is_not_smi); - __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, - scratch2, single_scratch); __ b(&done); __ bind(&obj_is_not_smi); @@ -785,11 +872,12 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Exponent greater than 31 cannot yield 32-bit integers. // Also, a positive value with an exponent equal to 31 is outside of the // signed 32-bit integer range. - // Another way to put it is that if (exponent - signbit) > 30 then the - // number cannot be represented as an int32. - Register tmp = dst; - __ sub(tmp, scratch, Operand(src1, LSR, 31)); - __ cmp(tmp, Operand(30)); + __ tst(src1, Operand(HeapNumber::kSignMask)); + __ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30 + // the gt condition will be "correct" and + // the next instruction will be skipped. + __ cmp(scratch, Operand(31), ne); // Executed for negative and positive where + // exponent is not 30. __ b(gt, not_int32); // - Bits [21:0] in the mantissa are not null. __ tst(src2, Operand(0x3fffff)); @@ -838,25 +926,21 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( // Push the current return address before the C call. Return will be // through pop(pc) below. __ push(lr); - __ PrepareCallCFunction(0, 2, scratch); - if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP3); - __ vmov(d0, r0, r1); - __ vmov(d1, r2, r3); - } + __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. // Call C routine that may not cause GC or other trouble. - __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), - 0, 2); - // Store answer in the overwritable heap number. Double returned in - // registers r0 and r1 or in d0. - if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP3); - __ vstr(d0, - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - } else { - __ Strd(r0, r1, FieldMemOperand(heap_number_result, - HeapNumber::kValueOffset)); - } + __ CallCFunction(ExternalReference::double_fp_operation(op), 4); + // Store answer in the overwritable heap number. +#if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from heap_number_result. + __ sub(scratch, heap_number_result, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset)); +#else + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(heap_number_result, + HeapNumber::kValueOffset)); +#endif // Place heap_number_result in r0 and return to the pushed return address. __ mov(r0, Operand(heap_number_result)); __ pop(pc); @@ -924,19 +1008,19 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, // The two objects are identical. If we know that one of them isn't NaN then // we now know they test equal. if (cond != eq || !never_nan_nan) { - // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), // so we do the second best thing - test it ourselves. // They are both equal and they are not both Smis so both of them are not // Smis. If it's not a heap number, then return equal. if (cond == lt || cond == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); __ b(ge, slow); } else { __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ b(eq, &heap_number); // Comparing JS objects with <=, >= is complicated. if (cond != eq) { - __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ b(ge, slow); // Normally here we fall through to return_equal, but undefined is // special: (undefined == undefined) == true, but @@ -1027,7 +1111,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, (lhs.is(r1) && rhs.is(r0))); Label rhs_is_smi; - __ JumpIfSmi(rhs, &rhs_is_smi); + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); // Lhs is a Smi. Check whether the rhs is a heap number. __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); @@ -1058,7 +1143,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Convert lhs to a double in r2, r3. __ mov(r7, Operand(lhs)); ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode()); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); // Load rhs to a double in r0, r1. __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); __ pop(lr); @@ -1100,7 +1185,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Convert rhs to a double in r0, r1. __ mov(r7, Operand(rhs)); ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode()); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); __ pop(lr); } // Fall through to both_loaded_as_doubles. @@ -1197,14 +1282,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, // Call a native function to do a comparison between two non-NaNs. // Call C routine that may not cause GC or other trouble. __ push(lr); - __ PrepareCallCFunction(0, 2, r5); - if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP3); - __ vmov(d0, r0, r1); - __ vmov(d1, r2, r3); - } - __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), - 0, 2); + __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments. + __ CallCFunction(ExternalReference::compare_doubles(), 4); __ pop(pc); // Return. } } @@ -1217,14 +1296,14 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); - // If either operand is a JS object or an oddball value, then they are + // If either operand is a JSObject or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); Label first_non_object; // Get the type of the first operand into r2 and compare it with - // FIRST_SPEC_OBJECT_TYPE. - __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); + // FIRST_JS_OBJECT_TYPE. + __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); __ b(lt, &first_non_object); // Return non-zero (r0 is not zero) @@ -1237,7 +1316,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, __ cmp(r2, Operand(ODDBALL_TYPE)); __ b(eq, &return_not_equal); - __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); __ b(ge, &return_not_equal); // Check for oddballs: true, false, null, undefined. @@ -1314,9 +1393,9 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, __ Ret(); __ bind(&object_test); - __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, not_both_strings); - __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); __ b(lt, not_both_strings); // If both objects are undetectable, they are equal. Otherwise, they // are not equal, since they are different objects and an object is not @@ -1357,7 +1436,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, // number string cache for smis is just the smi value, and the hash for // doubles is the xor of the upper and lower words. See // Heap::GetNumberStringCache. - Isolate* isolate = masm->isolate(); Label is_smi; Label load_result_from_cache; if (!object_is_smi) { @@ -1368,7 +1446,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, scratch1, Heap::kHeapNumberMapRootIndex, not_found, - DONT_DO_SMI_CHECK); + true); STATIC_ASSERT(8 == kDoubleSize); __ add(scratch1, @@ -1419,7 +1497,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ bind(&load_result_from_cache); __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(isolate->counters()->number_to_string_native(), + __ IncrementCounter(&Counters::number_to_string_native, 1, scratch1, scratch2); @@ -1455,7 +1533,8 @@ void CompareStub::Generate(MacroAssembler* masm) { if (include_smi_compare_) { Label not_two_smis, smi_done; __ orr(r2, r1, r0); - __ JumpIfNotSmi(r2, ¬_two_smis); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, ¬_two_smis); __ mov(r1, Operand(r1, ASR, 1)); __ sub(r0, r1, Operand(r0, ASR, 1)); __ Ret(); @@ -1478,7 +1557,8 @@ void CompareStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); __ and_(r2, lhs_, Operand(rhs_)); - __ JumpIfNotSmi(r2, ¬_smis); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: // 1) Return the answer. // 2) Go to slow. @@ -1493,7 +1573,6 @@ void CompareStub::Generate(MacroAssembler* masm) { __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if // VFP3 is supported, or in r0, r1, r2, and r3. - Isolate* isolate = masm->isolate(); if (CpuFeatures::IsSupported(VFP3)) { __ bind(&lhs_not_nan); CpuFeatures::Scope scope(VFP3); @@ -1564,23 +1643,14 @@ void CompareStub::Generate(MacroAssembler* masm) { __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); - __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); - if (cc_ == eq) { - StringCompareStub::GenerateFlatAsciiStringEquals(masm, + __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs_, rhs_, r2, r3, - r4); - } else { - StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs_, - rhs_, - r2, - r3, - r4, - r5); - } + r4, + r5); // Never falls through to here. __ bind(&slow); @@ -1605,72 +1675,32 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. - __ InvokeBuiltin(native, JUMP_FUNCTION); + __ InvokeBuiltin(native, JUMP_JS); } +// This stub does not handle the inlined cases (Smis, Booleans, undefined). // The stub returns zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { // This stub uses VFP3 instructions. - CpuFeatures::Scope scope(VFP3); - - Label false_result, true_result, not_string; - const Register map = r9.is(tos_) ? r7 : r9; - - // undefined -> false - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(tos_, ip); - __ b(eq, &false_result); - - // Boolean -> its value - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(tos_, ip); - __ b(eq, &false_result); - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(tos_, ip); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the equal condition is satisfied. - __ Ret(eq); + ASSERT(CpuFeatures::IsEnabled(VFP3)); - // Smis: 0 -> false, all other -> true - __ tst(tos_, tos_); - __ b(eq, &false_result); - __ tst(tos_, Operand(kSmiTagMask)); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the not equal condition is satisfied. - __ Ret(eq); + Label false_result; + Label not_heap_number; + Register scratch = r9.is(tos_) ? r7 : r9; - // 'null' -> false __ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(tos_, ip); __ b(eq, &false_result); - // Get the map of the heap object. - __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); - - // Undetectable -> false. - __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsUndetectable)); - __ b(&false_result, ne); - - // JavaScript object -> true. - __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the greater than condition is satisfied. - __ Ret(ge); - - // String value -> false iff empty. - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ b(¬_string, ge); - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); - // Return string length as boolean value, i.e. return false iff length is 0. - __ Ret(); + // HeapNumber => false iff +0, -0, or NaN. + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, ip); + __ b(¬_heap_number, ne); - __ bind(¬_string); - // HeapNumber -> false iff +0, -0, or NaN. - __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); - __ b(&true_result, ne); - __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); + __ sub(ip, tos_, Operand(kHeapObjectTag)); + __ vldr(d1, ip, HeapNumber::kValueOffset); __ VFPCompareAndSetFlags(d1, 0.0); // "tos_" is a register, and contains a non zero value by default. // Hence we only need to overwrite "tos_" with zero to return false for @@ -1679,135 +1709,542 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN __ Ret(); - // Return 1/0 for true/false in tos_. - __ bind(&true_result); - __ mov(tos_, Operand(1, RelocInfo::NONE)); + __ bind(¬_heap_number); + + // Check if the value is 'null'. + // 'null' => false. + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(tos_, ip); + __ b(&false_result, eq); + + // It can be an undetectable object. + // Undetectable => false. + __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); + __ b(&false_result, eq); + + // JavaScript object => true. + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); + // "tos_" is a register and contains a non-zero value. + // Hence we implicitly return true if the greater than + // condition is satisfied. + __ Ret(gt); + + // Check for string + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); + // "tos_" is a register and contains a non-zero value. + // Hence we implicitly return true if the greater than + // condition is satisfied. + __ Ret(gt); + + // String value => false iff empty, i.e., length is zero + __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); + // If length is zero, "tos_" contains zero ==> false. + // If length is not zero, "tos_" contains a non-zero value ==> true. __ Ret(); + + // Return 0 in "tos_" for false . __ bind(&false_result); __ mov(tos_, Operand(0, RelocInfo::NONE)); __ Ret(); } -const char* UnaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name = NULL; // Make g++ happy. - switch (mode_) { - case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; - case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; - } +// We fall into this code if the operands were Smis, but the result was +// not (eg. overflow). We branch into this code (to the not_smi label) if +// the operands were not both Smi. The operands are in r0 and r1. In order +// to call the C-implemented binary fp operation routines we need to end up +// with the double precision floating point operands in r0 and r1 (for the +// value in r1) and r2 and r3 (for the value in r0). +void GenericBinaryOpStub::HandleBinaryOpSlowCases( + MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin) { + Label slow, slow_reverse, do_the_call; + bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; + + ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); + Register heap_number_map = r6; - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); - return name_; -} + if (ShouldGenerateSmiCode()) { + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + // Smi-smi case (overflow). + // Since both are Smis there is no heap number to overwrite, so allocate. + // The new heap number is in r5. r3 and r7 are scratch. + __ AllocateHeapNumber( + r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::Generate(MacroAssembler* masm) { - switch (operand_type_) { - case UnaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case UnaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case UnaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case UnaryOpIC::GENERIC: - GenerateGenericStub(masm); - break; + // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, + // using registers d7 and d6 for the double values. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + if (!use_fp_registers) { + __ vmov(r2, r3, d7); + __ vmov(r0, r1, d6); + } + } else { + // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. + __ mov(r7, Operand(rhs)); + ConvertToDoubleStub stub1(r3, r2, r7, r9); + __ push(lr); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. + __ mov(r7, Operand(lhs)); + ConvertToDoubleStub stub2(r1, r0, r7, r9); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ jmp(&do_the_call); // Tail call. No return. } -} + // We branch here if at least one of r0 and r1 is not a Smi. + __ bind(not_smi); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); -void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - __ mov(r3, Operand(r0)); // the operand - __ mov(r2, Operand(Smi::FromInt(op_))); - __ mov(r1, Operand(Smi::FromInt(mode_))); - __ mov(r0, Operand(Smi::FromInt(operand_type_))); - __ Push(r3, r2, r1, r0); + // After this point we have the left hand side in r1 and the right hand side + // in r0. + if (lhs.is(r0)) { + __ Swap(r0, r1, ip); + } - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); -} + // The type transition also calculates the answer. + bool generate_code_to_calculate_answer = true; + if (ShouldGenerateFPCode()) { + // DIV has neither SmiSmi fast code nor specialized slow code. + // So don't try to patch a DIV Stub. + if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + GenerateTypeTransition(masm); // Tail call. + generate_code_to_calculate_answer = false; + break; -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateSmiStubSub(masm); - break; - case Token::BIT_NOT: - GenerateSmiStubBitNot(masm); - break; - default: - UNREACHABLE(); + case Token::DIV: + // DIV has neither SmiSmi fast code nor specialized slow code. + // So don't try to patch a DIV Stub. + break; + + default: + break; + } + } + + if (generate_code_to_calculate_answer) { + Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; + if (mode_ == NO_OVERWRITE) { + // In the case where there is no chance of an overwritable float we may + // as well do the allocation immediately while r0 and r1 are untouched. + __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); + } + + // Move r0 to a double in r2-r3. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + if (mode_ == OVERWRITE_RIGHT) { + __ mov(r5, Operand(r0)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r0 to d7. + __ sub(r7, r0, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that second double is in r2 and r3. + __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); + } + __ jmp(&finished_loading_r0); + __ bind(&r0_is_smi); + if (mode_ == OVERWRITE_RIGHT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r0 to double in d7. + __ mov(r7, Operand(r0, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + if (!use_fp_registers) { + __ vmov(r2, r3, d7); + } + } else { + // Write Smi from r0 to r3 and r2 in double format. + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub3(r3, r2, r7, r4); + __ push(lr); + __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. + // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. + Label r1_is_not_smi; + if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) && + HasSmiSmiFastPath()) { + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &r1_is_not_smi); + GenerateTypeTransition(masm); // Tail call. + } + + __ bind(&finished_loading_r0); + + // Move r1 to a double in r0-r1. + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. + __ bind(&r1_is_not_smi); + __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + if (mode_ == OVERWRITE_LEFT) { + __ mov(r5, Operand(r1)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r1 to d6. + __ sub(r7, r1, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that first double is in r0 and r1. + __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); + } + __ jmp(&finished_loading_r1); + __ bind(&r1_is_smi); + if (mode_ == OVERWRITE_LEFT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r1 to double in d6. + __ mov(r7, Operand(r1, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + if (!use_fp_registers) { + __ vmov(r0, r1, d6); + } + } else { + // Write Smi from r1 to r1 and r0 in double format. + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub4(r1, r0, r7, r9); + __ push(lr); + __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + __ bind(&finished_loading_r1); + } + + if (generate_code_to_calculate_answer || do_the_call.is_linked()) { + __ bind(&do_the_call); + // If we are inlining the operation using VFP3 instructions for + // add, subtract, multiply, or divide, the arguments are in d6 and d7. + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions to implement + // double precision, add, subtract, multiply, divide. + + if (Token::MUL == op_) { + __ vmul(d5, d6, d7); + } else if (Token::DIV == op_) { + __ vdiv(d5, d6, d7); + } else if (Token::ADD == op_) { + __ vadd(d5, d6, d7); + } else if (Token::SUB == op_) { + __ vsub(d5, d6, d7); + } else { + UNREACHABLE(); + } + __ sub(r0, r5, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ add(r0, r0, Operand(kHeapObjectTag)); + __ Ret(); + } else { + // If we did not inline the operation, then the arguments are in: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + // r5: Address of heap number for result. + + __ push(lr); // For later. + __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. + // Call C routine that may not cause GC or other trouble. r5 is callee + // save. + __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); + // Store answer in the overwritable heap number. + #if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from r5. + __ sub(r4, r5, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); + #else + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); + #endif + __ mov(r0, Operand(r5)); + // And we are done. + __ pop(pc); + } + } } -} + if (!generate_code_to_calculate_answer && + !slow_reverse.is_linked() && + !slow.is_linked()) { + return; + } + + if (lhs.is(r0)) { + __ b(&slow); + __ bind(&slow_reverse); + __ Swap(r0, r1, ip); + } + + heap_number_map = no_reg; // Don't use this any more from here on. -void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeSub(masm, &non_smi, &slow); - __ bind(&non_smi); + // We jump to here if something goes wrong (one param is not a number of any + // sort or new-space allocation fails). __ bind(&slow); - GenerateTypeTransition(masm); -} + // Push arguments to the stack + __ Push(r1, r0); -void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { - Label non_smi; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateTypeTransition(masm); + if (Token::ADD == op_) { + // Test for string arguments before calling runtime. + // r1 : first argument + // r0 : second argument + // sp[0] : second argument + // sp[4] : first argument + + Label not_strings, not_string1, string1, string1_smi2; + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, ¬_string1); + __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, ¬_string1); + + // First argument is a a string, test second. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &string1_smi2); + __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, &string1); + + // First and second argument are strings. + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + __ TailCallStub(&string_add_stub); + + __ bind(&string1_smi2); + // First argument is a string, second is a smi. Try to lookup the number + // string for the smi in the number string cache. + NumberToStringStub::GenerateLookupNumberStringCache( + masm, r0, r2, r4, r5, r6, true, &string1); + + // Replace second argument on stack and tailcall string add stub to make + // the result. + __ str(r2, MemOperand(sp, 0)); + __ TailCallStub(&string_add_stub); + + // Only first argument is a string. + __ bind(&string1); + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); + + // First argument was not a string, test second. + __ bind(¬_string1); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, ¬_strings); + __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, ¬_strings); + + // Only second argument is a string. + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); + + __ bind(¬_strings); + } + + __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. } -void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, - Label* non_smi, - Label* slow) { - __ JumpIfNotSmi(r0, non_smi); +// For bitwise ops where the inputs are not both Smis we here try to determine +// whether both inputs are either Smis or at least heap numbers that can be +// represented by a 32 bit signed value. We truncate towards zero as required +// by the ES spec. If this is the case we do the bitwise op and see if the +// result is a Smi. If so, great, otherwise we try to find a heap number to +// write the answer into (either by allocating or by overwriting). +// On entry the operands are in lhs and rhs. On exit the answer is in r0. +void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs) { + Label slow, result_not_a_smi; + Label rhs_is_smi, lhs_is_smi; + Label done_checking_rhs, done_checking_lhs; + + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow); + __ jmp(&done_checking_lhs); + __ bind(&lhs_is_smi); + __ mov(r3, Operand(lhs, ASR, 1)); + __ bind(&done_checking_lhs); + + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow); + __ jmp(&done_checking_rhs); + __ bind(&rhs_is_smi); + __ mov(r2, Operand(rhs, ASR, 1)); + __ bind(&done_checking_rhs); - // The result of negating zero or the smallest negative smi is not a smi. - __ bic(ip, r0, Operand(0x80000000), SetCC); - __ b(eq, slow); + ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); - // Return '0 - value'. - __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); + // r0 and r1: Original operands (Smi or heap numbers). + // r2 and r3: Signed int32 operands. + switch (op_) { + case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; + case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; + case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; + case Token::SAR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, ASR, r2)); + break; + case Token::SHR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSR, r2), SetCC); + // SHR is special because it is required to produce a positive answer. + // The code below for writing into heap numbers isn't capable of writing + // the register as an unsigned int so we go to slow case if we hit this + // case. + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, &result_not_a_smi); + } else { + __ b(mi, &slow); + } + break; + case Token::SHL: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSL, r2)); + break; + default: UNREACHABLE(); + } + // check that the *signed* result fits in a smi + __ add(r3, r2, Operand(0x40000000), SetCC); + __ b(mi, &result_not_a_smi); + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); __ Ret(); -} + Label have_to_allocate, got_a_heap_number; + __ bind(&result_not_a_smi); + switch (mode_) { + case OVERWRITE_RIGHT: { + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &have_to_allocate); + __ mov(r5, Operand(rhs)); + break; + } + case OVERWRITE_LEFT: { + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &have_to_allocate); + __ mov(r5, Operand(lhs)); + break; + } + case NO_OVERWRITE: { + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + default: break; + } + __ bind(&got_a_heap_number); + // r2: Answer as signed int32. + // r5: Heap number to write answer into. -void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, - Label* non_smi) { - __ JumpIfNotSmi(r0, non_smi); + // Nothing can go wrong now, so move the heap number to r0, which is the + // result. + __ mov(r0, Operand(r5)); - // Flip bits and revert inverted smi-tag. - __ mvn(r0, Operand(r0)); - __ bic(r0, r0, Operand(kSmiTagMask)); - __ Ret(); -} + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r2); + if (op_ == Token::SHR) { + __ vcvt_f64_u32(d0, s0); + } else { + __ vcvt_f64_s32(d0, s0); + } + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); + } else { + // Tail call that writes the int32 in r2 to the heap number in r0, using + // r3 as scratch. r0 is preserved and returned. + WriteInt32ToHeapNumberStub stub(r2, r0, r3); + __ TailCallStub(&stub); + } + if (mode_ != NO_OVERWRITE) { + __ bind(&have_to_allocate); + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + __ jmp(&got_a_heap_number); + } -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { + // If all else failed then we go to the runtime system. + __ bind(&slow); + __ Push(lhs, rhs); // Restore stack. switch (op_) { - case Token::SUB: - GenerateHeapNumberStubSub(masm); + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); break; - case Token::BIT_NOT: - GenerateHeapNumberStubBitNot(masm); + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_JS); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_JS); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break; default: UNREACHABLE(); @@ -1815,179 +2252,574 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { - Label non_smi, slow, call_builtin; - GenerateSmiCodeSub(masm, &non_smi, &call_builtin); - __ bind(&non_smi); - GenerateHeapNumberCodeSub(masm, &slow); - __ bind(&slow); - GenerateTypeTransition(masm); - __ bind(&call_builtin); - GenerateGenericCodeFallback(masm); + + +// This function takes the known int in a register for the cases +// where it doesn't know a good trick, and may deliver +// a result that needs shifting. +static void MultiplyByKnownIntInStub( + MacroAssembler* masm, + Register result, + Register source, + Register known_int_register, // Smi tagged. + int known_int, + int* required_shift) { // Including Smi tag shift + switch (known_int) { + case 3: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 1; + break; + case 5: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 1; + break; + case 6: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 2; + break; + case 7: + __ rsb(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 9: + __ add(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 10: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 2; + break; + default: + ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. + __ mul(result, source, known_int_register); + *required_shift = 0; + } } -void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateHeapNumberCodeBitNot(masm, &slow); - __ bind(&slow); - GenerateTypeTransition(masm); +// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 +// trick. See http://en.wikipedia.org/wiki/Divisibility_rule +// Takes the sum of the digits base (mask + 1) repeatedly until we have a +// number from 0 to mask. On exit the 'eq' condition flags are set if the +// answer is exactly the mask. +void IntegerModStub::DigitSum(MacroAssembler* masm, + Register lhs, + int mask, + int shift, + Label* entry) { + ASSERT(mask > 0); + ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. + Label loop; + __ bind(&loop); + __ and_(ip, lhs, Operand(mask)); + __ add(lhs, ip, Operand(lhs, LSR, shift)); + __ bind(entry); + __ cmp(lhs, Operand(mask)); + __ b(gt, &loop); } -void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, - Label* slow) { - EmitCheckForHeapNumber(masm, r0, r1, r6, slow); - // r0 is a heap number. Get a new heap number in r1. - if (mode_ == UNARY_OVERWRITE) { - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - } else { - Label slow_allocate_heapnumber, heapnumber_allocated; - __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber); - __ jmp(&heapnumber_allocated); - __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r1, Operand(r0)); - __ pop(r0); - __ LeaveInternalFrame(); +void IntegerModStub::DigitSum(MacroAssembler* masm, + Register lhs, + Register scratch, + int mask, + int shift1, + int shift2, + Label* entry) { + ASSERT(mask > 0); + ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. + Label loop; + __ bind(&loop); + __ bic(scratch, lhs, Operand(mask)); + __ and_(ip, lhs, Operand(mask)); + __ add(lhs, ip, Operand(lhs, LSR, shift1)); + __ add(lhs, lhs, Operand(scratch, LSR, shift2)); + __ bind(entry); + __ cmp(lhs, Operand(mask)); + __ b(gt, &loop); +} + - __ bind(&heapnumber_allocated); - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); - __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); - __ mov(r0, Operand(r1)); +// Splits the number into two halves (bottom half has shift bits). The top +// half is subtracted from the bottom half. If the result is negative then +// rhs is added. +void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, + Register lhs, + int shift, + int rhs) { + int mask = (1 << shift) - 1; + __ and_(ip, lhs, Operand(mask)); + __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); + __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); +} + + +void IntegerModStub::ModReduce(MacroAssembler* masm, + Register lhs, + int max, + int denominator) { + int limit = denominator; + while (limit * 2 <= max) limit *= 2; + while (limit >= denominator) { + __ cmp(lhs, Operand(limit)); + __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); + limit >>= 1; } +} + + +void IntegerModStub::ModAnswer(MacroAssembler* masm, + Register result, + Register shift_distance, + Register mask_bits, + Register sum_of_digits) { + __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); __ Ret(); } -void UnaryOpStub::GenerateHeapNumberCodeBitNot( - MacroAssembler* masm, Label* slow) { - Label impossible; +// See comment for class. +void IntegerModStub::Generate(MacroAssembler* masm) { + __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); + __ bic(odd_number_, odd_number_, Operand(1)); + __ mov(odd_number_, Operand(odd_number_, LSL, 1)); + // We now have (odd_number_ - 1) * 2 in the register. + // Build a switch out of branches instead of data because it avoids + // having to teach the assembler about intra-code-object pointers + // that are not in relative branch instructions. + Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; + Label mod21, mod23, mod25; + { Assembler::BlockConstPoolScope block_const_pool(masm); + __ add(pc, pc, Operand(odd_number_)); + // When you read pc it is always 8 ahead, but when you write it you always + // write the actual value. So we put in two nops to take up the slack. + __ nop(); + __ nop(); + __ b(&mod3); + __ b(&mod5); + __ b(&mod7); + __ b(&mod9); + __ b(&mod11); + __ b(&mod13); + __ b(&mod15); + __ b(&mod17); + __ b(&mod19); + __ b(&mod21); + __ b(&mod23); + __ b(&mod25); + } - EmitCheckForHeapNumber(masm, r0, r1, r6, slow); - // Convert the heap number is r0 to an untagged integer in r1. - __ ConvertToInt32(r0, r1, r2, r3, d0, slow); + // For each denominator we find a multiple that is almost only ones + // when expressed in binary. Then we do the sum-of-digits trick for + // that number. If the multiple is not 1 then we have to do a little + // more work afterwards to get the answer into the 0-denominator-1 + // range. + DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. + __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. + ModGetInRangeBySubtraction(masm, lhs_, 2, 5); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. + __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. + ModGetInRangeBySubtraction(masm, lhs_, 3, 9); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. + ModReduce(masm, lhs_, 0x3f, 11); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. + ModReduce(masm, lhs_, 0xff, 13); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. + __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. + ModGetInRangeBySubtraction(masm, lhs_, 4, 17); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. + ModReduce(masm, lhs_, 0xff, 19); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. + ModReduce(masm, lhs_, 0x3f, 21); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. + ModReduce(masm, lhs_, 0xff, 23); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. + ModReduce(masm, lhs_, 0x7f, 25); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); +} - // Do the bitwise operation and check if the result fits in a smi. - Label try_float; - __ mvn(r1, Operand(r1)); - __ add(r2, r1, Operand(0x40000000), SetCC); - __ b(mi, &try_float); - // Tag the result as a smi and we're done. - __ mov(r0, Operand(r1, LSL, kSmiTagSize)); - __ Ret(); +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + // lhs_ : x + // rhs_ : y + // r0 : result - // Try to store the result in a heap number. - __ bind(&try_float); - if (mode_ == UNARY_NO_OVERWRITE) { - Label slow_allocate_heapnumber, heapnumber_allocated; - // Allocate a new heap number without zapping r0, which we need if it fails. - __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber); - __ jmp(&heapnumber_allocated); + Register result = r0; + Register lhs = lhs_; + Register rhs = rhs_; - __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); - __ LeaveInternalFrame(); + // This code can't cope with other register allocations yet. + ASSERT(result.is(r0) && + ((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0)))); - // Convert the heap number in r0 to an untagged integer in r1. - // This can't go slow-case because it's the same number we already - // converted once again. - __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible); - __ mvn(r1, Operand(r1)); + Register smi_test_reg = r7; + Register scratch = r9; - __ bind(&heapnumber_allocated); - __ mov(r0, r2); // Move newly allocated heap number to r0. + // All ops need to know whether we are dealing with two Smis. Set up + // smi_test_reg to tell us that. + if (ShouldGenerateSmiCode()) { + __ orr(smi_test_reg, lhs, Operand(rhs)); } - if (CpuFeatures::IsSupported(VFP3)) { - // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r1); - __ vcvt_f64_s32(d0, s0); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r2, HeapNumber::kValueOffset); - __ Ret(); - } else { - // WriteInt32ToHeapNumberStub does not trigger GC, so we do not - // have to set up a frame. - WriteInt32ToHeapNumberStub stub(r1, r0, r2); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); - } + switch (op_) { + case Token::ADD: { + Label not_smi; + // Fast path. + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r0, Operand(r1)); // Revert optimistic add. + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); + break; + } - __ bind(&impossible); - if (FLAG_debug_code) { - __ stop("Incorrect assumption in bit-not stub"); - } -} + case Token::SUB: { + Label not_smi; + // Fast path. + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + if (lhs.is(r1)) { + __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. + } else { + __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. + } + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); + break; + } + case Token::MUL: { + Label not_smi, slow; + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ b(ne, ¬_smi); + // Remove tag from one operand (but keep sign), so that result is Smi. + __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); + // Do multiplication + // scratch = lower 32 bits of ip * lhs. + __ smull(scratch, scratch2, lhs, ip); + // Go slow on overflows (overflow bit is not set). + __ mov(ip, Operand(scratch, ASR, 31)); + // No overflow if higher 33 bits are identical. + __ cmp(ip, Operand(scratch2)); + __ b(ne, &slow); + // Go slow on zero result to handle -0. + __ tst(scratch, Operand(scratch)); + __ mov(result, Operand(scratch), LeaveCC, ne); + __ Ret(ne); + // We need -0 if we were multiplying a negative number with 0 to get 0. + // We know one of them was zero. + __ add(scratch2, rhs, Operand(lhs), SetCC); + __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ Ret(pl); // Return Smi 0 if the non-zero one was positive. + // Slow case. We fall through here if we multiplied a negative number + // with 0, because that would mean we should produce -0. + __ bind(&slow); + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); + break; + } -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateGenericStubSub(masm); + case Token::DIV: + case Token::MOD: { + Label not_smi; + if (ShouldGenerateSmiCode() && specialized_on_rhs_) { + Label lhs_is_unsuitable; + __ JumpIfNotSmi(lhs, ¬_smi); + if (IsPowerOf2(constant_rhs_)) { + if (op_ == Token::MOD) { + __ and_(rhs, + lhs, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), + SetCC); + // We now have the answer, but if the input was negative we also + // have the sign bit. Our work is done if the result is + // positive or zero: + if (!rhs.is(r0)) { + __ mov(r0, rhs, LeaveCC, pl); + } + __ Ret(pl); + // A mod of a negative left hand side must return a negative number. + // Unfortunately if the answer is 0 then we must return -0. And we + // already optimistically trashed rhs so we may need to restore it. + __ eor(rhs, rhs, Operand(0x80000000u), SetCC); + // Next two instructions are conditional on the answer being -0. + __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); + __ b(eq, &lhs_is_unsuitable); + // We need to subtract the dividend. Eg. -3 % 4 == -3. + __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); + } else { + ASSERT(op_ == Token::DIV); + __ tst(lhs, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); + __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. + int shift = 0; + int d = constant_rhs_; + while ((d & 1) == 0) { + d >>= 1; + shift++; + } + __ mov(r0, Operand(lhs, LSR, shift)); + __ bic(r0, r0, Operand(kSmiTagMask)); + } + } else { + // Not a power of 2. + __ tst(lhs, Operand(0x80000000u)); + __ b(ne, &lhs_is_unsuitable); + // Find a fixed point reciprocal of the divisor so we can divide by + // multiplying. + double divisor = 1.0 / constant_rhs_; + int shift = 32; + double scale = 4294967296.0; // 1 << 32. + uint32_t mul; + // Maximise the precision of the fixed point reciprocal. + while (true) { + mul = static_cast<uint32_t>(scale * divisor); + if (mul >= 0x7fffffff) break; + scale *= 2.0; + shift++; + } + mul++; + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ mov(scratch2, Operand(mul)); + __ umull(scratch, scratch2, scratch2, lhs); + __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); + // scratch2 is lhs / rhs. scratch2 is not Smi tagged. + // rhs is still the known rhs. rhs is Smi tagged. + // lhs is still the unkown lhs. lhs is Smi tagged. + int required_scratch_shift = 0; // Including the Smi tag shift of 1. + // scratch = scratch2 * rhs. + MultiplyByKnownIntInStub(masm, + scratch, + scratch2, + rhs, + constant_rhs_, + &required_scratch_shift); + // scratch << required_scratch_shift is now the Smi tagged rhs * + // (lhs / rhs) where / indicates integer division. + if (op_ == Token::DIV) { + __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); + __ b(ne, &lhs_is_unsuitable); // There was a remainder. + __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); + } else { + ASSERT(op_ == Token::MOD); + __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); + } + } + __ Ret(); + __ bind(&lhs_is_unsuitable); + } else if (op_ == Token::MOD && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS) { + // Do generate a bit of smi code for modulus even though the default for + // modulus is not to do it, but as the ARM processor has no coprocessor + // support for modulus checking for smis makes sense. We can handle + // 1 to 25 times any power of 2. This covers over half the numbers from + // 1 to 100 including all of the first 25. (Actually the constants < 10 + // are handled above by reciprocal multiplication. We only get here for + // those cases if the right hand side is not a constant or for cases + // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod + // stub.) + Label slow; + Label not_power_of_2; + ASSERT(!ShouldGenerateSmiCode()); + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + // Check for two positive smis. + __ orr(smi_test_reg, lhs, Operand(rhs)); + __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); + __ b(ne, &slow); + // Check that rhs is a power of two and not zero. + Register mask_bits = r3; + __ sub(scratch, rhs, Operand(1), SetCC); + __ b(mi, &slow); + __ and_(mask_bits, rhs, Operand(scratch), SetCC); + __ b(ne, ¬_power_of_2); + // Calculate power of two modulus. + __ and_(result, lhs, Operand(scratch)); + __ Ret(); + + __ bind(¬_power_of_2); + __ eor(scratch, scratch, Operand(mask_bits)); + // At least two bits are set in the modulus. The high one(s) are in + // mask_bits and the low one is scratch + 1. + __ and_(mask_bits, scratch, Operand(lhs)); + Register shift_distance = scratch; + scratch = no_reg; + + // The rhs consists of a power of 2 multiplied by some odd number. + // The power-of-2 part we handle by putting the corresponding bits + // from the lhs in the mask_bits register, and the power in the + // shift_distance register. Shift distance is never 0 due to Smi + // tagging. + __ CountLeadingZeros(r4, shift_distance, shift_distance); + __ rsb(shift_distance, r4, Operand(32)); + + // Now we need to find out what the odd number is. The last bit is + // always 1. + Register odd_number = r4; + __ mov(odd_number, Operand(rhs, LSR, shift_distance)); + __ cmp(odd_number, Operand(25)); + __ b(gt, &slow); + + IntegerModStub stub( + result, shift_distance, odd_number, mask_bits, lhs, r5); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. + + __ bind(&slow); + } + HandleBinaryOpSlowCases( + masm, + ¬_smi, + lhs, + rhs, + op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); break; - case Token::BIT_NOT: - GenerateGenericStubBitNot(masm); + } + + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + Label slow; + STATIC_ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, &slow); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + switch (op_) { + case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; + case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; + case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; + case Token::SAR: + // Remove tags from right operand. + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(result, Operand(lhs, ASR, scratch2)); + // Smi tag result. + __ bic(result, result, Operand(kSmiTagMask)); + break; + case Token::SHR: + // Remove tags from operands. We can't do this on a 31 bit number + // because then the 0s get shifted into bit 30 instead of bit 31. + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSR, scratch2)); + // Unsigned shift is not allowed to produce a negative number, so + // check the sign bit and the sign bit after Smi tagging. + __ tst(scratch, Operand(0xc0000000)); + __ b(ne, &slow); + // Smi tag result. + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); + break; + case Token::SHL: + // Remove tags from operands. + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSL, scratch2)); + // Check that the signed result fits in a Smi. + __ add(scratch2, scratch, Operand(0x40000000), SetCC); + __ b(mi, &slow); + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); + break; + default: UNREACHABLE(); + } + __ Ret(); + __ bind(&slow); + HandleNonSmiBitwiseOp(masm, lhs, rhs); break; - default: - UNREACHABLE(); + } + + default: UNREACHABLE(); + } + // This code should be unreachable. + __ stop("Unreachable"); + + // Generate an unreachable reference to the DEFAULT stub so that it can be + // found at the end of this stub when clearing ICs at GC. + // TODO(kaznacheev): Check performance impact and get rid of this. + if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { + GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); + __ CallStub(&uninit); } } -void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeSub(masm, &non_smi, &slow); - __ bind(&non_smi); - GenerateHeapNumberCodeSub(masm, &slow); - __ bind(&slow); - GenerateGenericCodeFallback(masm); +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + __ Push(r1, r0); + + __ mov(r2, Operand(Smi::FromInt(MinorKey()))); + __ mov(r1, Operand(Smi::FromInt(op_))); + __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); + __ Push(r2, r1, r0); + + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); } -void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateHeapNumberCodeBitNot(masm, &slow); - __ bind(&slow); - GenerateGenericCodeFallback(masm); +Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); } -void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { - // Handle the slow case by jumping to the JavaScript builtin. - __ push(r0); - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } +Handle<Code> GetTypeRecordingBinaryOpStub(int key, + TRBinaryOpIC::TypeInfo type_info, + TRBinaryOpIC::TypeInfo result_type_info) { + TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); + return stub.GetCode(); } -void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { Label get_result; __ Push(r1, r0); @@ -1998,43 +2830,39 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { __ Push(r2, r1, r0); __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch), - masm->isolate()), + ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), 5, 1); } -void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( +void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( MacroAssembler* masm) { UNIMPLEMENTED(); } -void BinaryOpStub::Generate(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { switch (operands_type_) { - case BinaryOpIC::UNINITIALIZED: + case TRBinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); break; - case BinaryOpIC::SMI: + case TRBinaryOpIC::SMI: GenerateSmiStub(masm); break; - case BinaryOpIC::INT32: + case TRBinaryOpIC::INT32: GenerateInt32Stub(masm); break; - case BinaryOpIC::HEAP_NUMBER: + case TRBinaryOpIC::HEAP_NUMBER: GenerateHeapNumberStub(masm); break; - case BinaryOpIC::ODDBALL: + case TRBinaryOpIC::ODDBALL: GenerateOddballStub(masm); break; - case BinaryOpIC::BOTH_STRING: - GenerateBothStringStub(masm); - break; - case BinaryOpIC::STRING: + case TRBinaryOpIC::STRING: GenerateStringStub(masm); break; - case BinaryOpIC::GENERIC: + case TRBinaryOpIC::GENERIC: GenerateGeneric(masm); break; default: @@ -2043,11 +2871,10 @@ void BinaryOpStub::Generate(MacroAssembler* masm) { } -const char* BinaryOpStub::GetName() { +const char* TypeRecordingBinaryOpStub::GetName() { if (name_ != NULL) return name_; const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); if (name_ == NULL) return "OOM"; const char* op_name = Token::Name(op_); const char* overwrite_name; @@ -2059,15 +2886,16 @@ const char* BinaryOpStub::GetName() { } OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "BinaryOpStub_%s_%s_%s", + "TypeRecordingBinaryOpStub_%s_%s_%s", op_name, overwrite_name, - BinaryOpIC::GetName(operands_type_)); + TRBinaryOpIC::GetName(operands_type_)); return name_; } -void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( + MacroAssembler* masm) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2192,15 +3020,14 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { } -void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required) { +void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, + bool smi_operands, + Label* not_numbers, + Label* gc_required) { Register left = r1; Register right = r0; Register scratch1 = r7; Register scratch2 = r9; - Register scratch3 = r4; ASSERT(smi_operands || (not_numbers != NULL)); if (smi_operands && FLAG_debug_code) { @@ -2220,8 +3047,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = - CpuFeatures::IsSupported(VFP3) && - op_ != Token::MOD ? + CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; @@ -2275,9 +3101,6 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, op_, result, scratch1); - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } } break; } @@ -2292,24 +3115,22 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ SmiUntag(r2, right); } else { // Convert operands to 32-bit integers. Right in r2 and left in r3. - FloatingPointHelper::ConvertNumberToInt32(masm, - left, - r3, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - not_numbers); - FloatingPointHelper::ConvertNumberToInt32(masm, - right, - r2, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - not_numbers); + FloatingPointHelper::LoadNumberAsInteger(masm, + left, + r3, + heap_number_map, + scratch1, + scratch2, + d0, + not_numbers); + FloatingPointHelper::LoadNumberAsInteger(masm, + right, + r2, + heap_number_map, + scratch1, + scratch2, + d0, + not_numbers); } Label result_not_a_smi; @@ -2406,9 +3227,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // generated. If the result is not a smi and heap number allocation is not // requested the code falls through. If number allocation is requested but a // heap number cannot be allocated the code jumps to the lable gc_required. -void BinaryOpStub::GenerateSmiCode( - MacroAssembler* masm, - Label* use_runtime, +void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* gc_required, SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { Label not_smis; @@ -2421,7 +3240,8 @@ void BinaryOpStub::GenerateSmiCode( // Perform combined smi check on both operands. __ orr(scratch1, left, Operand(right)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfNotSmi(scratch1, ¬_smis); + __ tst(scratch1, Operand(kSmiTagMask)); + __ b(ne, ¬_smis); // If the smi-smi operation results in a smi return is generated. GenerateSmiSmiOperation(masm); @@ -2429,26 +3249,23 @@ void BinaryOpStub::GenerateSmiCode( // If heap number results are possible generate the result in an allocated // heap number. if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { - GenerateFPOperation(masm, true, use_runtime, gc_required); + GenerateFPOperation(masm, true, NULL, gc_required); } __ bind(¬_smis); } -void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { Label not_smis, call_runtime; - if (result_type_ == BinaryOpIC::UNINITIALIZED || - result_type_ == BinaryOpIC::SMI) { + if (result_type_ == TRBinaryOpIC::UNINITIALIZED || + result_type_ == TRBinaryOpIC::SMI) { // Only allow smi results. - GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); + GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - GenerateSmiCode(masm, - &call_runtime, - &call_runtime, - ALLOW_HEAPNUMBER_RESULTS); + GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); } // Code falls through if the result is not returned as either a smi or heap @@ -2460,48 +3277,18 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::STRING); +void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(operands_type_ == TRBinaryOpIC::STRING); ASSERT(op_ == Token::ADD); // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. + // TRBinaryOpIC type. GenerateAddStrings(masm); GenerateTypeTransition(masm); } -void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { - Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); - ASSERT(op_ == Token::ADD); - // If both arguments are strings, call the string add stub. - // Otherwise, do a transition. - - // Registers containing left and right operands respectively. - Register left = r1; - Register right = r0; - - // Test if left operand is a string. - __ JumpIfSmi(left, &call_runtime); - __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); - - // Test if right operand is a string. - __ JumpIfSmi(right, &call_runtime); - __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); - - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); - - __ bind(&call_runtime); - GenerateTypeTransition(masm); -} - - -void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::INT32); +void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { + ASSERT(operands_type_ == TRBinaryOpIC::INT32); Register left = r1; Register right = r0; @@ -2534,36 +3321,36 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::MUL: case Token::DIV: case Token::MOD: { - // Load both operands and check that they are 32-bit integer. - // Jump to type transition if they are not. The registers r0 and r1 (right - // and left) are preserved for the runtime call. - FloatingPointHelper::Destination destination = - (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD) - ? FloatingPointHelper::kVFPRegisters - : FloatingPointHelper::kCoreRegisters; - - FloatingPointHelper::LoadNumberAsInt32Double(masm, - right, - destination, - d7, - r2, - r3, - heap_number_map, - scratch1, - scratch2, - s0, - &transition); - FloatingPointHelper::LoadNumberAsInt32Double(masm, - left, - destination, - d6, - r4, - r5, - heap_number_map, - scratch1, - scratch2, - s0, - &transition); + // Load both operands and check that they are 32-bit integer. + // Jump to type transition if they are not. The registers r0 and r1 (right + // and left) are preserved for the runtime call. + FloatingPointHelper::Destination destination = + CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? + FloatingPointHelper::kVFPRegisters : + FloatingPointHelper::kCoreRegisters; + + FloatingPointHelper::LoadNumberAsInt32Double(masm, + right, + destination, + d7, + r2, + r3, + heap_number_map, + scratch1, + scratch2, + s0, + &transition); + FloatingPointHelper::LoadNumberAsInt32Double(masm, + left, + destination, + d6, + r4, + r5, + heap_number_map, + scratch1, + scratch2, + s0, + &transition); if (destination == FloatingPointHelper::kVFPRegisters) { CpuFeatures::Scope scope(VFP3); @@ -2597,7 +3384,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { scratch1, scratch2); - if (result_type_ <= BinaryOpIC::INT32) { + if (result_type_ <= TRBinaryOpIC::INT32) { // If the ne condition is set, result does // not fit in a 32-bit integer. __ b(ne, &transition); @@ -2608,27 +3395,14 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ add(scratch2, scratch1, Operand(0x40000000), SetCC); // If not try to return a heap number. __ b(mi, &return_heap_number); - // Check for minus zero. Return heap number for minus zero. - Label not_zero; - __ cmp(scratch1, Operand(0)); - __ b(ne, ¬_zero); - __ vmov(scratch2, d5.high()); - __ tst(scratch2, Operand(HeapNumber::kSignMask)); - __ b(ne, &return_heap_number); - __ bind(¬_zero); - // Tag the result and return. __ SmiTag(r0, scratch1); __ Ret(); - } else { - // DIV just falls through to allocating a heap number. } - __ bind(&return_heap_number); - // Return a heap number, or fall through to type transition or runtime - // call if we can't. - if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER - : BinaryOpIC::INT32)) { + if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER + : TRBinaryOpIC::INT32) { + __ bind(&return_heap_number); // We are using vfp registers so r5 is available. heap_number_result = r5; GenerateHeapResultAllocation(masm, @@ -2668,9 +3442,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Call the C function to handle the double operation. FloatingPointHelper::CallCCodeForDoubleOperation( masm, op_, heap_number_result, scratch1); - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } __ bind(&pop_and_call_runtime); __ Drop(2); @@ -2736,13 +3507,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // The non vfp3 code does not support this special case, so jump to // runtime if we don't support it. if (CpuFeatures::IsSupported(VFP3)) { - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &return_heap_number); + __ b(mi, + (result_type_ <= TRBinaryOpIC::INT32) ? &transition + : &return_heap_number); } else { - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &call_runtime); + __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition + : &call_runtime); } break; case Token::SHL: @@ -2762,16 +3532,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ Ret(); __ bind(&return_heap_number); - heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); - if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); + heap_number_result = r5; + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); + if (op_ != Token::SHR) { // Convert the result to a floating point value. __ vmov(double_scratch.low(), r2); @@ -2790,7 +3560,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } else { // Tail call that writes the int32 in r2 to the heap number in r0, using // r3 as scratch. r0 is preserved and returned. - __ mov(r0, r5); WriteInt32ToHeapNumberStub stub(r2, r0, r3); __ TailCallStub(&stub); } @@ -2802,11 +3571,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { UNREACHABLE(); } - // We never expect DIV to yield an integer result, so we always generate - // type transition code for DIV operations expecting an integer result: the - // code will fall through to this type transition. - if (transition.is_linked() || - ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { + if (transition.is_linked()) { __ bind(&transition); GenerateTypeTransition(masm); } @@ -2816,7 +3581,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } -void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { Label call_runtime; if (op_ == Token::ADD) { @@ -2827,7 +3592,8 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { // Convert oddball arguments to numbers. Label check, done; - __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r1, ip); __ b(ne, &check); if (Token::IsBitOp(op_)) { __ mov(r1, Operand(Smi::FromInt(0))); @@ -2836,7 +3602,8 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { } __ jmp(&done); __ bind(&check); - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r0, ip); __ b(ne, &done); if (Token::IsBitOp(op_)) { __ mov(r0, Operand(Smi::FromInt(0))); @@ -2849,19 +3616,22 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { } -void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - Label call_runtime; - GenerateFPOperation(masm, false, &call_runtime, &call_runtime); +void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { + Label not_numbers, call_runtime; + GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); + + __ bind(¬_numbers); + GenerateTypeTransition(masm); __ bind(&call_runtime); GenerateCallRuntime(masm); } -void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { Label call_runtime, call_string_add_or_runtime; - GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); @@ -2875,7 +3645,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } -void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { ASSERT(op_ == Token::ADD); Label left_not_string, call_runtime; @@ -2906,41 +3676,41 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { GenerateRegisterArgsPush(masm); switch (op_) { case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::ADD, JUMP_JS); break; case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SUB, JUMP_JS); break; case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::MUL, JUMP_JS); break; case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::DIV, JUMP_JS); break; case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::MOD, JUMP_JS); break; case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); break; case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); break; case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); break; case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break; case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break; case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break; default: UNREACHABLE(); @@ -2948,12 +3718,14 @@ void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { } -void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required) { +void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( + MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required) { + // Code below will scratch result if allocation fails. To keep both arguments // intact for the runtime call result cannot be one of these. ASSERT(!result.is(r0) && !result.is(r1)); @@ -2980,53 +3752,38 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, } -void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { +void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { __ Push(r1, r0); } void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Untagged case: double input in d2, double result goes - // into d2. - // Tagged case: tagged input on top of stack and in r0, - // tagged result (heap number) goes into r0. - + // Argument is a number and is on stack and in r0. + Label runtime_call; Label input_not_smi; Label loaded; - Label calculate; - Label invalid_cache; - const Register scratch0 = r9; - const Register scratch1 = r7; - const Register cache_entry = r0; - const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(VFP3)) { + // Load argument and check if it is a smi. + __ JumpIfNotSmi(r0, &input_not_smi); + CpuFeatures::Scope scope(VFP3); - if (tagged) { - // Argument is a number and is on stack and in r0. - // Load argument and check if it is a smi. - __ JumpIfNotSmi(r0, &input_not_smi); - - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &calculate, - DONT_DO_SMI_CHECK); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ vmov(r2, r3, d0); - } else { - // Input is untagged double in d2. Output goes to d2. - __ vmov(r2, r3, d2); - } + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &runtime_call, + true); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ bind(&loaded); // r2 = low 32 bits of double value // r3 = high 32 bits of double value @@ -3035,28 +3792,24 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ eor(r1, r2, Operand(r3)); __ eor(r1, r1, Operand(r1, ASR, 16)); __ eor(r1, r1, Operand(r1, ASR, 8)); - ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); - __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); + ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); + __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); // r2 = low 32 bits of double value. // r3 = high 32 bits of double value. // r1 = TranscendentalCache::hash(double value). - Isolate* isolate = masm->isolate(); - ExternalReference cache_array = - ExternalReference::transcendental_cache_array_address(isolate); - __ mov(cache_entry, Operand(cache_array)); - // cache_entry points to cache array. - int cache_array_index - = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); - __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); + __ mov(r0, + Operand(ExternalReference::transcendental_cache_array_address())); + // r0 points to cache array. + __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); // r0 points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); - __ b(eq, &invalid_cache); + __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ b(eq, &runtime_call); #ifdef DEBUG // Check that the layout of cache elements match expectations. - { TranscendentalCache::SubCache::Element test_elem[2]; + { TranscendentalCache::Element test_elem[2]; char* elem_start = reinterpret_cast<char*>(&test_elem[0]); char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); @@ -3071,120 +3824,21 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); + __ add(r0, r0, Operand(r1, LSL, 2)); // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); + __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); __ cmp(r2, r4); - __ b(ne, &calculate); + __ b(ne, &runtime_call); __ cmp(r3, r5); - __ b(ne, &calculate); - // Cache hit. Load result, cleanup and return. - if (tagged) { - // Pop input value from stack and load result into r0. - __ pop(); - __ mov(r0, Operand(r6)); - } else { - // Load result into d2. - __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); - } - __ Ret(); - } // if (CpuFeatures::IsSupported(VFP3)) - - __ bind(&calculate); - if (tagged) { - __ bind(&invalid_cache); - ExternalReference runtime_function = - ExternalReference(RuntimeFunction(), masm->isolate()); - __ TailCallExternalReference(runtime_function, 1, 1); - } else { - if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE(); - CpuFeatures::Scope scope(VFP3); - - Label no_update; - Label skip_cache; - const Register heap_number_map = r5; - - // Call C function to calculate the result and update the cache. - // Register r0 holds precalculated cache entry address; preserve - // it on the stack and pop it into register cache_entry after the - // call. - __ push(cache_entry); - GenerateCallCFunction(masm, scratch0); - __ GetCFunctionDoubleResult(d2); - - // Try to update the cache. If we cannot allocate a - // heap number, we return the result without updating. - __ pop(cache_entry); - __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); - __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); - __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); - __ Ret(); - - __ bind(&invalid_cache); - // The cache is invalid. Call runtime which will recreate the - // cache. - __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); - __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); - __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ Ret(); - - __ bind(&skip_cache); - // Call C function to calculate the result and answer directly - // without updating the cache. - GenerateCallCFunction(masm, scratch0); - __ GetCFunctionDoubleResult(d2); - __ bind(&no_update); - - // We return the value in d2 without adding it to the cache, but - // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ mov(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + __ b(ne, &runtime_call); + // Cache hit. Load result, pop argument and return. + __ mov(r0, Operand(r6)); + __ pop(); __ Ret(); } -} - -void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, - Register scratch) { - Isolate* isolate = masm->isolate(); - - __ push(lr); - __ PrepareCallCFunction(0, 1, scratch); - if (masm->use_eabi_hardfloat()) { - __ vmov(d0, d2); - } else { - __ vmov(r0, r1, d2); - } - switch (type_) { - case TranscendentalCache::SIN: - __ CallCFunction(ExternalReference::math_sin_double_function(isolate), - 0, 1); - break; - case TranscendentalCache::COS: - __ CallCFunction(ExternalReference::math_cos_double_function(isolate), - 0, 1); - break; - case TranscendentalCache::LOG: - __ CallCFunction(ExternalReference::math_log_double_function(isolate), - 0, 1); - break; - default: - UNIMPLEMENTED(); - break; - } - __ pop(lr); + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); } @@ -3206,110 +3860,138 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } -void MathPowStub::Generate(MacroAssembler* masm) { - Label call_runtime; +void GenericUnaryOpStub::Generate(MacroAssembler* masm) { + Label slow, done; - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - Label base_not_smi; - Label exponent_not_smi; - Label convert_exponent; - - const Register base = r0; - const Register exponent = r1; - const Register heapnumbermap = r5; - const Register heapnumber = r6; - const DoubleRegister double_base = d0; - const DoubleRegister double_exponent = d1; - const DoubleRegister double_result = d2; - const SwVfpRegister single_scratch = s0; - const Register scratch = r9; - const Register scratch2 = r7; - - __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); - __ ldr(base, MemOperand(sp, 1 * kPointerSize)); - __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); - - // Convert base to double value and store it in d0. - __ JumpIfNotSmi(base, &base_not_smi); - // Base is a Smi. Untag and convert it. - __ SmiUntag(base); - __ vmov(single_scratch, base); - __ vcvt_f64_s32(double_base, single_scratch); - __ b(&convert_exponent); - - __ bind(&base_not_smi); - __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); - __ cmp(scratch, heapnumbermap); - __ b(ne, &call_runtime); - // Base is a heapnumber. Load it into double register. - __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); - - __ bind(&convert_exponent); - __ JumpIfNotSmi(exponent, &exponent_not_smi); - __ SmiUntag(exponent); - - // The base is in a double register and the exponent is - // an untagged smi. Allocate a heap number and call a - // C function for integer exponents. The register containing - // the heap number is callee-saved. - __ AllocateHeapNumber(heapnumber, - scratch, - scratch2, - heapnumbermap, - &call_runtime); - __ push(lr); - __ PrepareCallCFunction(1, 1, scratch); - __ SetCallCDoubleArguments(double_base, exponent); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), - 1, 1); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - __ vstr(double_result, - FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); - __ mov(r0, heapnumber); - __ Ret(2 * kPointerSize); - - __ bind(&exponent_not_smi); - __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); - __ cmp(scratch, heapnumbermap); - __ b(ne, &call_runtime); - // Exponent is a heapnumber. Load it into double register. - __ vldr(double_exponent, - FieldMemOperand(exponent, HeapNumber::kValueOffset)); - - // The base and the exponent are in double registers. - // Allocate a heap number and call a C function for - // double exponents. The register containing - // the heap number is callee-saved. - __ AllocateHeapNumber(heapnumber, - scratch, - scratch2, - heapnumbermap, - &call_runtime); - __ push(lr); - __ PrepareCallCFunction(0, 2, scratch); - __ SetCallCDoubleArguments(double_base, double_exponent); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), - 0, 2); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); - __ vstr(double_result, - FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); - __ mov(r0, heapnumber); - __ Ret(2 * kPointerSize); - } + if (op_ == Token::SUB) { + if (include_smi_code_) { + // Check whether the value is a smi. + Label try_float; + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &try_float); + + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + if (negative_zero_ == kStrictNegativeZero) { + // If we have to check for zero, then we can check for the max negative + // smi while we are at it. + __ bic(ip, r0, Operand(0x80000000), SetCC); + __ b(eq, &slow); + __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); + __ Ret(); + } else { + // The value of the expression is a smi and 0 is OK for -0. Try + // optimistic subtraction '0 - value'. + __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC); + __ Ret(vc); + // We don't have to reverse the optimistic neg since the only case + // where we fall through is the minimum negative Smi, which is the case + // where the neg leaves the register unchanged. + __ jmp(&slow); // Go slow on max negative Smi. + } + __ bind(&try_float); + } else if (FLAG_debug_code) { + __ tst(r0, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected smi operand."); + } - __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); -} + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); + __ b(ne, &slow); + // r0 is a heap number. Get a new heap number in r1. + if (overwrite_ == UNARY_OVERWRITE) { + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + } else { + __ AllocateHeapNumber(r1, r2, r3, r6, &slow); + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); + __ mov(r0, Operand(r1)); + } + } else if (op_ == Token::BIT_NOT) { + if (include_smi_code_) { + Label non_smi; + __ JumpIfNotSmi(r0, &non_smi); + __ mvn(r0, Operand(r0)); + // Bit-clear inverted smi-tag. + __ bic(r0, r0, Operand(kSmiTagMask)); + __ Ret(); + __ bind(&non_smi); + } else if (FLAG_debug_code) { + __ tst(r0, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected smi operand."); + } + + // Check if the operand is a heap number. + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); + __ b(ne, &slow); + // Convert the heap number is r0 to an untagged integer in r1. + __ ConvertToInt32(r0, r1, r2, r3, d0, &slow); -bool CEntryStub::NeedsImmovableCode() { - return true; + // Do the bitwise operation (move negated) and check if the result + // fits in a smi. + Label try_float; + __ mvn(r1, Operand(r1)); + __ add(r2, r1, Operand(0x40000000), SetCC); + __ b(mi, &try_float); + __ mov(r0, Operand(r1, LSL, kSmiTagSize)); + __ b(&done); + + __ bind(&try_float); + if (!overwrite_ == UNARY_OVERWRITE) { + // Allocate a fresh heap number, but don't overwrite r0 until + // we're sure we can do it without going through the slow case + // that needs the value in r0. + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + __ mov(r0, Operand(r2)); + } + + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r1); + __ vcvt_f64_s32(d0, s0); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r2, HeapNumber::kValueOffset); + } else { + // WriteInt32ToHeapNumberStub does not trigger GC, so we do not + // have to set up a frame. + WriteInt32ToHeapNumberStub stub(r1, r0, r2); + __ push(lr); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + } else { + UNIMPLEMENTED(); + } + + __ bind(&done); + __ Ret(); + + // Handle the slow case by jumping to the JavaScript builtin. + __ bind(&slow); + __ push(r0); + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); + break; + default: + UNREACHABLE(); + } } @@ -3334,17 +4016,15 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // r4: number of arguments including receiver (C callee-saved) // r5: pointer to builtin function (C callee-saved) // r6: pointer to the first argument (C callee-saved) - Isolate* isolate = masm->isolate(); if (do_gc) { // Passing r0. - __ PrepareCallCFunction(1, 0, r1); - __ CallCFunction(ExternalReference::perform_gc_function(isolate), - 1, 0); + __ PrepareCallCFunction(1, r1); + __ CallCFunction(ExternalReference::perform_gc_function(), 1); } ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(isolate); + ExternalReference::heap_always_allocate_scope_depth(); if (always_allocate) { __ mov(r0, Operand(scope_depth)); __ ldr(r1, MemOperand(r0)); @@ -3373,12 +4053,14 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } #endif - __ mov(r2, Operand(ExternalReference::isolate_address())); + // TODO(1242173): To let the GC traverse the return address of the exit + // frames, we need to know where the return address is. Right now, + // we store it on the stack to be able to find it again, but we never + // restore from it in case of changes, which makes it impossible to + // support moving the C entry code stub. This should be fixed, but currently + // this is OK because the CEntryStub gets generated so early in the V8 boot + // sequence that it is not moving ever. - // To let the GC traverse the return address of the exit frames, we need to - // know where the return address is. The CEntryStub is unmovable, so - // we can store the address on the stack to be able to find it again and - // we never have to restore it, because it will not change. // Compute the return address in lr to return to after the jump below. Pc is // already at '+ 8' from the current instruction but return is after three // instructions so add another 4 to pc to get the return address. @@ -3424,16 +4106,15 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); + __ mov(ip, Operand(ExternalReference::the_hole_value_location())); __ ldr(r3, MemOperand(ip)); - __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address, - isolate))); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); __ ldr(r0, MemOperand(ip)); __ str(r3, MemOperand(ip)); // Special handling of termination exceptions which are uncatchable // by javascript code. - __ cmp(r0, Operand(isolate->factory()->termination_exception())); + __ cmp(r0, Operand(Factory::termination_exception())); __ b(eq, throw_termination_exception); // Handle normal exception. @@ -3528,26 +4209,12 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Save callee-saved registers (incl. cp and fp), sp, and lr __ stm(db_w, sp, kCalleeSaved | lr.bit()); - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Save callee-saved vfp registers. - __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - // Set up the reserved register for 0.0. - __ vmov(kDoubleRegZero, 0.0); - } - // Get address of argv, see stm above. // r0: code entry // r1: function // r2: receiver // r3: argc - - // Setup argv in r4. - int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; - if (CpuFeatures::IsSupported(VFP3)) { - offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; - } - __ ldr(r4, MemOperand(sp, offset_to_argv)); + __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv // Push a frame with special values setup to mark it as an entry frame. // r0: code entry @@ -3555,13 +4222,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r2: receiver // r3: argc // r4: argv - Isolate* isolate = masm->isolate(); __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; __ mov(r7, Operand(Smi::FromInt(marker))); __ mov(r6, Operand(Smi::FromInt(marker))); - __ mov(r5, - Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate))); + __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); __ ldr(r5, MemOperand(r5)); __ Push(r8, r7, r6, r5); @@ -3570,20 +4235,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { #ifdef ENABLE_LOGGING_AND_PROFILING // If this is the outermost JS call, set js_entry_sp value. - Label non_outermost_js; - ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate); + ExternalReference js_entry_sp(Top::k_js_entry_sp_address); __ mov(r5, Operand(ExternalReference(js_entry_sp))); __ ldr(r6, MemOperand(r5)); - __ cmp(r6, Operand(0)); - __ b(ne, &non_outermost_js); - __ str(fp, MemOperand(r5)); - __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); - Label cont; - __ b(&cont); - __ bind(&non_outermost_js); - __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); - __ bind(&cont); - __ push(ip); + __ cmp(r6, Operand(0, RelocInfo::NONE)); + __ str(fp, MemOperand(r5), eq); #endif // Call a faked try-block that does the invoke. @@ -3593,8 +4249,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // exception field in the JSEnv and return a failure sentinel. // Coming in here the fp will be invalid because the PushTryHandler below // sets it to 0 to signal the existence of the JSEntry frame. - __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address, - isolate))); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); __ str(r0, MemOperand(ip)); __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); __ b(&exit); @@ -3609,10 +4264,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); + __ mov(ip, Operand(ExternalReference::the_hole_value_location())); __ ldr(r5, MemOperand(ip)); - __ mov(ip, Operand(ExternalReference(Isolate::k_pending_exception_address, - isolate))); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); __ str(r5, MemOperand(ip)); // Invoke the function by calling through JS entry trampoline builtin. @@ -3626,11 +4280,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r3: argc // r4: argv if (is_construct) { - ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, - isolate); + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); __ mov(ip, Operand(construct_entry)); } else { - ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); + ExternalReference entry(Builtins::JSEntryTrampoline); __ mov(ip, Operand(entry)); } __ ldr(ip, MemOperand(ip)); // deref address @@ -3641,26 +4294,30 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ mov(lr, Operand(pc)); masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Unlink this frame from the handler chain. - __ PopTryHandler(); + // Unlink this frame from the handler chain. When reading the + // address of the next handler, there is no need to use the address + // displacement since the current stack pointer (sp) points directly + // to the stack handler. + __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); + __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); + __ str(r3, MemOperand(ip)); + // No need to restore registers + __ add(sp, sp, Operand(StackHandlerConstants::kSize)); - __ bind(&exit); // r0 holds result #ifdef ENABLE_LOGGING_AND_PROFILING - // Check if the current stack frame is marked as the outermost JS frame. - Label non_outermost_js_2; - __ pop(r5); - __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); - __ b(ne, &non_outermost_js_2); - __ mov(r6, Operand(0)); + // If current FP value is the same as js_entry_sp value, it means that + // the current function is the outermost. __ mov(r5, Operand(ExternalReference(js_entry_sp))); - __ str(r6, MemOperand(r5)); - __ bind(&non_outermost_js_2); + __ ldr(r6, MemOperand(r5)); + __ cmp(fp, Operand(r6)); + __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ str(r6, MemOperand(r5), eq); #endif + __ bind(&exit); // r0 holds result // Restore the top frame descriptors from the stack. __ pop(r3); - __ mov(ip, - Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate))); + __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); __ str(r3, MemOperand(ip)); // Reset the stack to the callee saved registers. @@ -3672,13 +4329,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ mov(lr, Operand(pc)); } #endif - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Restore callee-saved vfp registers. - __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); - } - __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); } @@ -3824,7 +4474,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); // Null is not instance of anything. - __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); + __ cmp(scratch, Operand(Factory::null_value())); __ b(ne, &object_not_null); __ mov(r0, Operand(Smi::FromInt(1))); __ Ret(HasArgsInRegisters() ? 0 : 2); @@ -3847,11 +4497,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { if (HasArgsInRegisters()) { __ Push(r0, r1); } - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); } else { __ EnterInternalFrame(); __ Push(r0, r1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS); __ LeaveInternalFrame(); __ cmp(r0, Operand(0)); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); @@ -3918,233 +4568,12 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { } -void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { // sp[0] : number of parameters // sp[4] : receiver displacement // sp[8] : function // Check if the calling frame is an arguments adaptor frame. - Label runtime; - __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); - __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(ne, &runtime); - - // Patch the arguments.length and the parameters pointer in the current frame. - __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ str(r2, MemOperand(sp, 0 * kPointerSize)); - __ add(r3, r3, Operand(r2, LSL, 1)); - __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); - __ str(r3, MemOperand(sp, 1 * kPointerSize)); - - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { - // Stack layout: - // sp[0] : number of parameters (tagged) - // sp[4] : address of receiver argument - // sp[8] : function - // Registers used over whole function: - // r6 : allocated object (tagged) - // r9 : mapped parameter count (tagged) - - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); - // r1 = parameter count (tagged) - - // Check if the calling frame is an arguments adaptor frame. - Label runtime; - Label adaptor_frame, try_allocate; - __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); - __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(eq, &adaptor_frame); - - // No adaptor, parameter count = argument count. - __ mov(r2, r1); - __ b(&try_allocate); - - // We have an adaptor frame. Patch the parameters pointer. - __ bind(&adaptor_frame); - __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ add(r3, r3, Operand(r2, LSL, 1)); - __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); - __ str(r3, MemOperand(sp, 1 * kPointerSize)); - - // r1 = parameter count (tagged) - // r2 = argument count (tagged) - // Compute the mapped parameter count = min(r1, r2) in r1. - __ cmp(r1, Operand(r2)); - __ mov(r1, Operand(r2), LeaveCC, gt); - - __ bind(&try_allocate); - - // Compute the sizes of backing store, parameter map, and arguments object. - // 1. Parameter map, has 2 extra words containing context and backing store. - const int kParameterMapHeaderSize = - FixedArray::kHeaderSize + 2 * kPointerSize; - // If there are no mapped parameters, we do not need the parameter_map. - __ cmp(r1, Operand(Smi::FromInt(0))); - __ mov(r9, Operand(0), LeaveCC, eq); - __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne); - __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne); - - // 2. Backing store. - __ add(r9, r9, Operand(r2, LSL, 1)); - __ add(r9, r9, Operand(FixedArray::kHeaderSize)); - - // 3. Arguments object. - __ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); - - // Do the allocation of all three objects in one go. - __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT); - - // r0 = address of new object(s) (tagged) - // r2 = argument count (tagged) - // Get the arguments boilerplate from the current (global) context into r4. - const int kNormalOffset = - Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - const int kAliasedOffset = - Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); - - __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); - __ cmp(r1, Operand(0)); - __ ldr(r4, MemOperand(r4, kNormalOffset), eq); - __ ldr(r4, MemOperand(r4, kAliasedOffset), ne); - - // r0 = address of new object (tagged) - // r1 = mapped parameter count (tagged) - // r2 = argument count (tagged) - // r4 = address of boilerplate object (tagged) - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ ldr(r3, FieldMemOperand(r4, i)); - __ str(r3, FieldMemOperand(r0, i)); - } - - // Setup the callee in-object property. - STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); - __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); - const int kCalleeOffset = JSObject::kHeaderSize + - Heap::kArgumentsCalleeIndex * kPointerSize; - __ str(r3, FieldMemOperand(r0, kCalleeOffset)); - - // Use the length (smi tagged) and set that as an in-object property too. - STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); - const int kLengthOffset = JSObject::kHeaderSize + - Heap::kArgumentsLengthIndex * kPointerSize; - __ str(r2, FieldMemOperand(r0, kLengthOffset)); - - // Setup the elements pointer in the allocated arguments object. - // If we allocated a parameter map, r4 will point there, otherwise - // it will point to the backing store. - __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); - __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); - - // r0 = address of new object (tagged) - // r1 = mapped parameter count (tagged) - // r2 = argument count (tagged) - // r4 = address of parameter map or backing store (tagged) - // Initialize parameter map. If there are no mapped arguments, we're done. - Label skip_parameter_map; - __ cmp(r1, Operand(Smi::FromInt(0))); - // Move backing store address to r3, because it is - // expected there when filling in the unmapped arguments. - __ mov(r3, r4, LeaveCC, eq); - __ b(eq, &skip_parameter_map); - - __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex); - __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); - __ add(r6, r1, Operand(Smi::FromInt(2))); - __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); - __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize)); - __ add(r6, r4, Operand(r1, LSL, 1)); - __ add(r6, r6, Operand(kParameterMapHeaderSize)); - __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize)); - - // Copy the parameter slots and the holes in the arguments. - // We need to fill in mapped_parameter_count slots. They index the context, - // where parameters are stored in reverse order, at - // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 - // The mapped parameter thus need to get indices - // MIN_CONTEXT_SLOTS+parameter_count-1 .. - // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count - // We loop from right to left. - Label parameters_loop, parameters_test; - __ mov(r6, r1); - __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); - __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); - __ sub(r9, r9, Operand(r1)); - __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); - __ add(r3, r4, Operand(r6, LSL, 1)); - __ add(r3, r3, Operand(kParameterMapHeaderSize)); - - // r6 = loop variable (tagged) - // r1 = mapping index (tagged) - // r3 = address of backing store (tagged) - // r4 = address of parameter map (tagged) - // r5 = temporary scratch (a.o., for address calculation) - // r7 = the hole value - __ jmp(¶meters_test); - - __ bind(¶meters_loop); - __ sub(r6, r6, Operand(Smi::FromInt(1))); - __ mov(r5, Operand(r6, LSL, 1)); - __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); - __ str(r9, MemOperand(r4, r5)); - __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); - __ str(r7, MemOperand(r3, r5)); - __ add(r9, r9, Operand(Smi::FromInt(1))); - __ bind(¶meters_test); - __ cmp(r6, Operand(Smi::FromInt(0))); - __ b(ne, ¶meters_loop); - - __ bind(&skip_parameter_map); - // r2 = argument count (tagged) - // r3 = address of backing store (tagged) - // r5 = scratch - // Copy arguments header and remaining slots (if there are any). - __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); - __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); - __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); - - Label arguments_loop, arguments_test; - __ mov(r9, r1); - __ ldr(r4, MemOperand(sp, 1 * kPointerSize)); - __ sub(r4, r4, Operand(r9, LSL, 1)); - __ jmp(&arguments_test); - - __ bind(&arguments_loop); - __ sub(r4, r4, Operand(kPointerSize)); - __ ldr(r6, MemOperand(r4, 0)); - __ add(r5, r3, Operand(r9, LSL, 1)); - __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); - __ add(r9, r9, Operand(Smi::FromInt(1))); - - __ bind(&arguments_test); - __ cmp(r9, Operand(r2)); - __ b(lt, &arguments_loop); - - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // Do the runtime call to allocate the arguments object. - // r2 = argument count (taggged) - __ bind(&runtime); - __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { - // sp[0] : number of parameters - // sp[4] : receiver displacement - // sp[8] : function - // Check if the calling frame is an arguments adaptor frame. Label adaptor_frame, try_allocate, runtime; __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); @@ -4172,31 +4601,35 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ mov(r1, Operand(r1, LSR, kSmiTagSize)); __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); __ bind(&add_arguments_object); - __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); + __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); // Do the allocation of both objects in one go. - __ AllocateInNewSpace(r1, - r0, - r2, - r3, - &runtime, - static_cast<AllocationFlags>(TAG_OBJECT | - SIZE_IN_WORDS)); + __ AllocateInNewSpace( + r1, + r0, + r2, + r3, + &runtime, + static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); - __ ldr(r4, MemOperand(r4, Context::SlotOffset( - Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); + __ ldr(r4, MemOperand(r4, offset)); // Copy the JS object part. __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); + // Setup the callee in-object property. + STATIC_ASSERT(Heap::arguments_callee_index == 0); + __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); + __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); + // Get the length (smi tagged) and set that as an in-object property too. - STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); + STATIC_ASSERT(Heap::arguments_length_index == 1); __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); - __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + - Heap::kArgumentsLengthIndex * kPointerSize)); + __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); // If there are no actual arguments, we're done. Label done; @@ -4208,13 +4641,12 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Setup the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. - __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); + __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // Untag the length for the loop. - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); + __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop. // Copy the fixed array slots. Label loop; @@ -4237,7 +4669,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); } @@ -4276,11 +4708,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Register last_match_info_elements = r6; // Ensure that a RegExp stack is allocated. - Isolate* isolate = masm->isolate(); ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(isolate); + ExternalReference::address_of_regexp_stack_memory_address(); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(isolate); + ExternalReference::address_of_regexp_stack_memory_size(); __ mov(r0, Operand(address_of_regexp_stack_memory_size)); __ ldr(r0, MemOperand(r0, 0)); __ tst(r0, Operand(r0)); @@ -4289,7 +4720,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the first argument is a JSRegExp object. __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(r0, &runtime); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); __ b(ne, &runtime); @@ -4325,7 +4757,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // regexp_data: RegExp data (FixedArray) // Check that the second argument is a string. __ ldr(subject, MemOperand(sp, kSubjectOffset)); - __ JumpIfSmi(subject, &runtime); + __ tst(subject, Operand(kSmiTagMask)); + __ b(eq, &runtime); Condition is_string = masm->IsObjectStringType(subject, r0); __ b(NegateCondition(is_string), &runtime); // Get the length of the string to r3. @@ -4338,7 +4771,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the third argument is a positive smi less than the subject // string length. A negative value will be greater (unsigned comparison). __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); - __ JumpIfNotSmi(r0, &runtime); + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &runtime); __ cmp(r3, Operand(r0)); __ b(ls, &runtime); @@ -4347,7 +4781,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // regexp_data: RegExp data (FixedArray) // Check that the fourth object is a JSArray object. __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); - __ JumpIfSmi(r0, &runtime); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); __ b(ne, &runtime); // Check that the JSArray is in fast case. @@ -4417,7 +4852,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ CompareObjectType(r7, r0, r0, CODE_TYPE); __ b(ne, &runtime); - // r3: encoding of subject string (1 if ASCII, 0 if two_byte); + // r3: encoding of subject string (1 if ascii, 0 if two_byte); // r7: code // subject: Subject string // regexp_data: RegExp data (FixedArray) @@ -4427,25 +4862,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // r1: previous index - // r3: encoding of subject string (1 if ASCII, 0 if two_byte); + // r3: encoding of subject string (1 if ascii, 0 if two_byte); // r7: code // subject: Subject string // regexp_data: RegExp data (FixedArray) // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); + __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); - // Isolates: note we add an additional parameter here (isolate pointer). - static const int kRegExpExecuteArguments = 8; + static const int kRegExpExecuteArguments = 7; static const int kParameterRegisters = 4; __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); // Stack pointer now points to cell where return address is to be written. // Arguments are before that on the stack or in registers. - // Argument 8 (sp[16]): Pass current isolate address. - __ mov(r0, Operand(ExternalReference::isolate_address())); - __ str(r0, MemOperand(sp, 4 * kPointerSize)); - // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); __ str(r0, MemOperand(sp, 3 * kPointerSize)); @@ -4459,8 +4889,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ str(r0, MemOperand(sp, 2 * kPointerSize)); // Argument 5 (sp[4]): static offsets vector buffer. - __ mov(r0, - Operand(ExternalReference::address_of_static_offsets_vector(isolate))); + __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); __ str(r0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data and @@ -4508,10 +4937,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); + __ mov(r1, Operand(ExternalReference::the_hole_value_location())); __ ldr(r1, MemOperand(r1, 0)); - __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address, - isolate))); + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); __ ldr(r0, MemOperand(r2, 0)); __ cmp(r0, r1); __ b(eq, &runtime); @@ -4531,7 +4959,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&failure); // For failure and exception return null. - __ mov(r0, Operand(masm->isolate()->factory()->null_value())); + __ mov(r0, Operand(Factory::null_value())); __ add(sp, sp, Operand(4 * kPointerSize)); __ Ret(); @@ -4564,7 +4992,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(isolate); + ExternalReference::address_of_static_offsets_vector(); __ mov(r2, Operand(address_of_static_offsets_vector)); // r1: number of capture registers @@ -4602,12 +5030,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { const int kMaxInlineLength = 100; Label slowcase; Label done; - Factory* factory = masm->isolate()->factory(); - __ ldr(r1, MemOperand(sp, kPointerSize * 2)); STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); - __ JumpIfNotSmi(r1, &slowcase); + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &slowcase); __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); __ b(hi, &slowcase); // Smi-tagging is equivalent to multiplying by 2. @@ -4637,7 +5064,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // Interleave operations for better latency. __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); __ add(r3, r0, Operand(JSRegExpResult::kSize)); - __ mov(r4, Operand(factory->empty_fixed_array())); + __ mov(r4, Operand(Factory::empty_fixed_array())); __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); @@ -4658,13 +5085,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // r5: Number of elements in array, untagged. // Set map. - __ mov(r2, Operand(factory->fixed_array_map())); + __ mov(r2, Operand(Factory::fixed_array_map())); __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); // Set FixedArray length. __ mov(r6, Operand(r5, LSL, kSmiTagSize)); __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); // Fill contents of fixed-array with the-hole. - __ mov(r2, Operand(factory->the_hole_value())); + __ mov(r2, Operand(Factory::the_hole_value())); __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); // Fill fixed array elements with hole. // r0: JSArray, tagged. @@ -4691,22 +5118,30 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow; - // The receiver might implicitly be the global object. This is - // indicated by passing the hole as the receiver to the call - // function stub. - if (ReceiverMightBeImplicit()) { - Label call; + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { // Get the receiver from the stack. // function, receiver [, arguments] - __ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); - // Call as function is indicated with the hole. - __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); - __ b(ne, &call); - // Patch the receiver on the stack with the global receiver object. - __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); - __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ bind(&call); + Label receiver_is_value, receiver_is_js_object; + __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); + + // Check if receiver is a smi (which is a number value). + __ JumpIfSmi(r1, &receiver_is_value); + + // Check if the receiver is a valid JS object. + __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); + __ b(ge, &receiver_is_js_object); + + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(r1); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); + __ LeaveInternalFrame(); + __ str(r0, MemOperand(sp, argc_ * kPointerSize)); + + __ bind(&receiver_is_js_object); } // Get the function to call from the stack. @@ -4723,23 +5158,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Fast-case: Invoke the function now. // r1: pushed function ParameterCount actual(argc_); - - if (ReceiverMightBeImplicit()) { - Label call_as_function; - __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); - __ b(eq, &call_as_function); - __ InvokeFunction(r1, - actual, - JUMP_FUNCTION, - NullCallWrapper(), - CALL_AS_METHOD); - __ bind(&call_as_function); - } - __ InvokeFunction(r1, - actual, - JUMP_FUNCTION, - NullCallWrapper(), - CALL_AS_FUNCTION); + __ InvokeFunction(r1, actual, JUMP_FUNCTION); // Slow-case: Non-function called. __ bind(&slow); @@ -4749,7 +5168,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ mov(r0, Operand(argc_)); // Setup the number of arguments. __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); - __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET); } @@ -4762,8 +5181,7 @@ const char* CompareStub::GetName() { if (name_ != NULL) return name_; const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); if (name_ == NULL) return "OOM"; const char* cc_name; @@ -4922,7 +5340,7 @@ void StringCharCodeAtGenerator::GenerateSlow( scratch_, Heap::kHeapNumberMapRootIndex, index_not_number_, - DONT_DO_SMI_CHECK); + true); call_helper.BeforeCall(masm); __ Push(object_, index_); __ push(index_); // Consumed by runtime conversion function. @@ -4976,7 +5394,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { __ b(ne, &slow_case_); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); - // At this point code register contains smi tagged ASCII char code. + // At this point code register contains smi tagged ascii char code. STATIC_ASSERT(kSmiTag == 0); __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); @@ -5308,6 +5726,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register symbol_table = c2; __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + // Load undefined value Register undefined = scratch4; __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); @@ -5328,7 +5747,6 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // mask: capacity mask // first_symbol_table_element: address of the first element of // the symbol table - // undefined: the undefined object // scratch: - // Perform a number of probes in the symbol table. @@ -5356,32 +5774,20 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, kPointerSizeLog2)); // If entry is undefined no string with this hash can be found. - Label is_string; - __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE); - __ b(ne, &is_string); - - __ cmp(undefined, candidate); + __ cmp(candidate, undefined); __ b(eq, not_found); - // Must be null (deleted entry). - if (FLAG_debug_code) { - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(ip, candidate); - __ Assert(eq, "oddball in symbol table is not undefined or null"); - } - __ jmp(&next_probe[i]); - - __ bind(&is_string); - - // Check that the candidate is a non-external ASCII string. The instance - // type is still in the scratch register from the CompareObjectType - // operation. - __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); // If length is not 2 the string is not a candidate. __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); __ cmp(scratch, Operand(Smi::FromInt(2))); __ b(ne, &next_probe[i]); + // Check that the candidate is a non-external ascii string. + __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, + &next_probe[i]); + // Check if the two characters match. // Assumes that word load is little endian. __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); @@ -5456,6 +5862,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { static const int kFromOffset = 1 * kPointerSize; static const int kStringOffset = 2 * kPointerSize; + // Check bounds and smi-ness. Register to = r6; Register from = r7; @@ -5488,7 +5895,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Make sure first argument is a sequential (or flat) string. __ ldr(r5, MemOperand(sp, kStringOffset)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(r5, &runtime); + __ tst(r5, Operand(kSmiTagMask)); + __ b(eq, &runtime); Condition is_string = masm->IsObjectStringType(r5, r1); __ b(NegateCondition(is_string), &runtime); @@ -5535,7 +5943,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r3: from index (untaged smi) // r5: string. // r7 (a.k.a. from): from offset (smi) - // Check for flat ASCII string. + // Check for flat ascii string. Label non_ascii_flat; __ tst(r1, Operand(kStringEncodingMask)); STATIC_ASSERT(kTwoByteStringTag == 0); @@ -5555,8 +5963,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { Label make_two_character_string; StringHelper::GenerateTwoCharacterSymbolTableProbe( masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -5565,7 +5972,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&make_two_character_string); __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -5591,7 +5998,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, COPY_ASCII | DEST_ALWAYS_ALIGNED); - __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -5623,7 +6030,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong( masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); - __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -5633,45 +6040,6 @@ void SubStringStub::Generate(MacroAssembler* masm) { } -void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3) { - Register length = scratch1; - - // Compare lengths. - Label strings_not_equal, check_zero_length; - __ ldr(length, FieldMemOperand(left, String::kLengthOffset)); - __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); - __ cmp(length, scratch2); - __ b(eq, &check_zero_length); - __ bind(&strings_not_equal); - __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); - __ Ret(); - - // Check if the length is zero. - Label compare_chars; - __ bind(&check_zero_length); - STATIC_ASSERT(kSmiTag == 0); - __ tst(length, Operand(length)); - __ b(ne, &compare_chars); - __ mov(r0, Operand(Smi::FromInt(EQUAL))); - __ Ret(); - - // Compare characters. - __ bind(&compare_chars); - GenerateAsciiCharsCompareLoop(masm, - left, right, length, scratch2, scratch3, - &strings_not_equal); - - // Characters are equal. - __ mov(r0, Operand(Smi::FromInt(EQUAL))); - __ Ret(); -} - - void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, Register left, Register right, @@ -5679,7 +6047,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, Register scratch2, Register scratch3, Register scratch4) { - Label result_not_equal, compare_lengths; + Label compare_lengths; // Find minimum length and length difference. __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); @@ -5691,61 +6059,49 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ tst(min_length, Operand(min_length)); __ b(eq, &compare_lengths); - // Compare loop. - GenerateAsciiCharsCompareLoop(masm, - left, right, min_length, scratch2, scratch4, - &result_not_equal); + // Untag smi. + __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); + + // Setup registers so that we only need to increment one register + // in the loop. + __ add(scratch2, min_length, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(left, left, Operand(scratch2)); + __ add(right, right, Operand(scratch2)); + // Registers left and right points to the min_length character of strings. + __ rsb(min_length, min_length, Operand(-1)); + Register index = min_length; + // Index starts at -min_length. - // Compare lengths - strings up to min-length are equal. + { + // Compare loop. + Label loop; + __ bind(&loop); + // Compare characters. + __ add(index, index, Operand(1), SetCC); + __ ldrb(scratch2, MemOperand(left, index), ne); + __ ldrb(scratch4, MemOperand(right, index), ne); + // Skip to compare lengths with eq condition true. + __ b(eq, &compare_lengths); + __ cmp(scratch2, scratch4); + __ b(eq, &loop); + // Fallthrough with eq condition false. + } + // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); - // Use length_delta as result if it's zero. - __ mov(r0, Operand(length_delta), SetCC); - __ bind(&result_not_equal); - // Conditionally update the result based either on length_delta or - // the last comparion performed in the loop above. + // Use zero length_delta as result. + __ mov(r0, Operand(length_delta), SetCC, eq); + // Fall through to here if characters compare not-equal. __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); __ Ret(); } -void StringCompareStub::GenerateAsciiCharsCompareLoop( - MacroAssembler* masm, - Register left, - Register right, - Register length, - Register scratch1, - Register scratch2, - Label* chars_not_equal) { - // Change index to run from -length to -1 by adding length to string - // start. This means that loop ends when index reaches zero, which - // doesn't need an additional compare. - __ SmiUntag(length); - __ add(scratch1, length, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(left, left, Operand(scratch1)); - __ add(right, right, Operand(scratch1)); - __ rsb(length, length, Operand(0)); - Register index = length; // index = -length; - - // Compare loop. - Label loop; - __ bind(&loop); - __ ldrb(scratch1, MemOperand(left, index)); - __ ldrb(scratch2, MemOperand(right, index)); - __ cmp(scratch1, scratch2); - __ b(ne, chars_not_equal); - __ add(index, index, Operand(1), SetCC); - __ b(ne, &loop); -} - - void StringCompareStub::Generate(MacroAssembler* masm) { Label runtime; - Counters* counters = masm->isolate()->counters(); - // Stack frame on entry. // sp[0]: right string // sp[4]: left string @@ -5757,17 +6113,17 @@ void StringCompareStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ mov(r0, Operand(Smi::FromInt(EQUAL))); - __ IncrementCounter(counters->string_compare_native(), 1, r1, r2); + __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); __ bind(¬_same); - // Check that both objects are sequential ASCII strings. + // Check that both objects are sequential ascii strings. __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); - // Compare flat ASCII strings natively. Remove arguments from stack first. - __ IncrementCounter(counters->string_compare_native(), 1, r2, r3); + // Compare flat ascii strings natively. Remove arguments from stack first. + __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); @@ -5782,8 +6138,6 @@ void StringAddStub::Generate(MacroAssembler* masm) { Label string_add_runtime, call_builtin; Builtins::JavaScript builtin_id = Builtins::ADD; - Counters* counters = masm->isolate()->counters(); - // Stack on entry: // sp[0]: second argument (right). // sp[4]: first argument (left). @@ -5839,7 +6193,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ cmp(r3, Operand(Smi::FromInt(0)), ne); __ b(ne, &strings_not_empty); // If either string was empty, return r0. - __ IncrementCounter(counters->string_add_native(), 1, r2, r3); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -5860,12 +6214,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Adding two lengths can't overflow. STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); __ add(r6, r2, Operand(r3)); - // Use the symbol table when adding two one character strings, as it - // helps later optimizations to return a symbol here. + // Use the runtime system when adding two one character strings, as it + // contains optimizations for this specific case using the symbol table. __ cmp(r6, Operand(2)); __ b(ne, &longer_than_two); - // Check that both strings are non-external ASCII strings. + // Check that both strings are non-external ascii strings. if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -5884,7 +6238,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { Label make_two_character_string; StringHelper::GenerateTwoCharacterSymbolTableProbe( masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); - __ IncrementCounter(counters->string_add_native(), 1, r2, r3); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -5897,7 +6251,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(r6, Operand(2)); __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(counters->string_add_native(), 1, r2, r3); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -5913,7 +6267,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(hs, &string_add_runtime); // If result is not supposed to be flat, allocate a cons string object. - // If both strings are ASCII the result is an ASCII cons string. + // If both strings are ascii the result is an ascii cons string. if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -5934,13 +6288,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); __ mov(r0, Operand(r7)); - __ IncrementCounter(counters->string_add_native(), 1, r2, r3); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); __ bind(&non_ascii); // At least one of the strings is two-byte. Check whether it happens - // to contain only ASCII characters. + // to contain only ascii characters. // r4: first instance type. // r5: second instance type. __ tst(r4, Operand(kAsciiDataHintMask)); @@ -6016,7 +6370,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r7: result string. StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); __ mov(r0, Operand(r7)); - __ IncrementCounter(counters->string_add_native(), 1, r2, r3); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -6057,7 +6411,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); __ mov(r0, Operand(r7)); - __ IncrementCounter(counters->string_add_native(), 1, r2, r3); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -6067,7 +6421,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { if (call_builtin.is_linked()) { __ bind(&call_builtin); - __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + __ InvokeBuiltin(builtin_id, JUMP_JS); } } @@ -6121,11 +6475,62 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, } +void StringCharAtStub::Generate(MacroAssembler* masm) { + // Expects two arguments (object, index) on the stack: + // lr: return address + // sp[0]: index + // sp[4]: object + Register object = r1; + Register index = r0; + Register scratch1 = r2; + Register scratch2 = r3; + Register result = r0; + + // Get object and index from the stack. + __ pop(index); + __ pop(object); + + Label need_conversion; + Label index_out_of_range; + Label done; + StringCharAtGenerator generator(object, + index, + scratch1, + scratch2, + result, + &need_conversion, + &need_conversion, + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ b(&done); + + __ bind(&index_out_of_range); + // When the index is out of range, the spec requires us to return + // the empty string. + __ LoadRoot(result, Heap::kEmptyStringRootIndex); + __ jmp(&done); + + __ bind(&need_conversion); + // Move smi zero into the result register, which will trigger + // conversion. + __ mov(result, Operand(Smi::FromInt(0))); + __ b(&done); + + StubRuntimeCallHelper call_helper; + generator.GenerateSlow(masm, call_helper); + + __ bind(&done); + __ Ret(); +} + + void ICCompareStub::GenerateSmis(MacroAssembler* masm) { ASSERT(state_ == CompareIC::SMIS); Label miss; __ orr(r2, r1, r0); - __ JumpIfNotSmi(r2, &miss); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, &miss); if (GetCondition() == eq) { // For equality we do not care about the sign of the result. @@ -6149,7 +6554,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { Label unordered; Label miss; __ and_(r2, r1, Operand(r0)); - __ JumpIfSmi(r2, &generic_stub); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &generic_stub); __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); __ b(ne, &miss); @@ -6191,114 +6597,12 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } -void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOLS); - Label miss; - - // Registers containing left and right operands respectively. - Register left = r1; - Register right = r0; - Register tmp1 = r2; - Register tmp2 = r3; - - // Check that both operands are heap objects. - __ JumpIfEitherSmi(left, right, &miss); - - // Check that both operands are symbols. - __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); - __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); - __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); - __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, tmp1, Operand(tmp2)); - __ tst(tmp1, Operand(kIsSymbolMask)); - __ b(eq, &miss); - - // Symbols are compared by identity. - __ cmp(left, right); - // Make sure r0 is non-zero. At this point input operands are - // guaranteed to be non-zero. - ASSERT(right.is(r0)); - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); - __ Ret(); - - __ bind(&miss); - GenerateMiss(masm); -} - - -void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRINGS); - Label miss; - - // Registers containing left and right operands respectively. - Register left = r1; - Register right = r0; - Register tmp1 = r2; - Register tmp2 = r3; - Register tmp3 = r4; - Register tmp4 = r5; - - // Check that both operands are heap objects. - __ JumpIfEitherSmi(left, right, &miss); - - // Check that both operands are strings. This leaves the instance - // types loaded in tmp1 and tmp2. - __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); - __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); - __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); - __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kNotStringTag != 0); - __ orr(tmp3, tmp1, tmp2); - __ tst(tmp3, Operand(kIsNotStringMask)); - __ b(ne, &miss); - - // Fast check for identical strings. - __ cmp(left, right); - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); - __ Ret(eq); - - // Handle not identical strings. - - // Check that both strings are symbols. If they are, we're done - // because we already know they are not identical. - ASSERT(GetCondition() == eq); - STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp3, tmp1, Operand(tmp2)); - __ tst(tmp3, Operand(kIsSymbolMask)); - // Make sure r0 is non-zero. At this point input operands are - // guaranteed to be non-zero. - ASSERT(right.is(r0)); - __ Ret(ne); - - // Check that both strings are sequential ASCII. - Label runtime; - __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4, - &runtime); - - // Compare flat ASCII strings. Returns when done. - StringCompareStub::GenerateFlatAsciiStringEquals( - masm, left, right, tmp1, tmp2, tmp3); - - // Handle more complex cases in runtime. - __ bind(&runtime); - __ Push(left, right); - __ TailCallRuntime(Runtime::kStringEquals, 2, 1); - - __ bind(&miss); - GenerateMiss(masm); -} - - void ICCompareStub::GenerateObjects(MacroAssembler* masm) { ASSERT(state_ == CompareIC::OBJECTS); Label miss; __ and_(r2, r1, Operand(r0)); - __ JumpIfSmi(r2, &miss); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &miss); __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); __ b(ne, &miss); @@ -6319,8 +6623,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ push(lr); // Call the runtime system in a fresh internal frame. - ExternalReference miss = - ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); + ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss)); __ EnterInternalFrame(); __ Push(r1, r0); __ mov(ip, Operand(Smi::FromInt(op_))); @@ -6363,235 +6666,154 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, } -MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( - MacroAssembler* masm, - Label* miss, - Label* done, - Register receiver, - Register properties, - String* name, - Register scratch0) { - // If names of slots in range from 1 to kProbes - 1 for the hash value are - // not equal to the name and kProbes-th slot is not used (its name is the - // undefined value), it guarantees the hash table doesn't contain the - // property. It's true even if some slots represent deleted properties - // (their names are the null value). - for (int i = 0; i < kInlinedProbes; i++) { - // scratch0 points to properties hash. - // Compute the masked index: (hash + i + i * i) & mask. - Register index = scratch0; - // Capacity is smi 2^n. - __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); - __ sub(index, index, Operand(1)); - __ and_(index, index, Operand( - Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); - - // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); - __ add(index, index, Operand(index, LSL, 1)); // index *= 3. - - Register entity_name = scratch0; - // Having undefined at this place means the name is not contained. - ASSERT_EQ(kSmiTagSize, 1); - Register tmp = properties; - __ add(tmp, properties, Operand(index, LSL, 1)); - __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); - - ASSERT(!tmp.is(entity_name)); - __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); - __ cmp(entity_name, tmp); - __ b(eq, done); - - if (i != kInlinedProbes - 1) { - // Stop if found the property. - __ cmp(entity_name, Operand(Handle<String>(name))); - __ b(eq, miss); - - // Check if the entry name is not a symbol. - __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); - __ ldrb(entity_name, - FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ tst(entity_name, Operand(kIsSymbolMask)); - __ b(eq, miss); - - // Restore the properties. - __ ldr(properties, - FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - } - } - - const int spill_mask = - (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | - r2.bit() | r1.bit() | r0.bit()); - - __ stm(db_w, sp, spill_mask); - __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - __ mov(r1, Operand(Handle<String>(name))); - StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); - MaybeObject* result = masm->TryCallStub(&stub); - if (result->IsFailure()) return result; - __ tst(r0, Operand(r0)); - __ ldm(ia_w, sp, spill_mask); - - __ b(eq, done); - __ b(ne, miss); - return result; -} - +void GenerateFastPixelArrayLoad(MacroAssembler* masm, + Register receiver, + Register key, + Register elements_map, + Register elements, + Register scratch1, + Register scratch2, + Register result, + Label* not_pixel_array, + Label* key_not_smi, + Label* out_of_range) { + // Register use: + // + // receiver - holds the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // elements - set to be the receiver's elements on exit. + // + // elements_map - set to be the map of the receiver's elements + // on exit. + // + // result - holds the result of the pixel array load on exit, + // tagged as a smi if successful. + // + // Scratch registers: + // + // scratch1 - used a scratch register in map check, if map + // check is successful, contains the length of the + // pixel array, the pointer to external elements and + // the untagged result. + // + // scratch2 - holds the untaged key. -// Probe the string dictionary in the |elements| register. Jump to the -// |done| label if a property with the given name is found. Jump to -// the |miss| label otherwise. -// If lookup was successful |scratch2| will be equal to elements + 4 * index. -void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register scratch1, - Register scratch2) { - // Assert that name contains a string. - if (FLAG_debug_code) __ AbortIfNotString(name); - - // Compute the capacity mask. - __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); - __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int - __ sub(scratch1, scratch1, Operand(1)); - - // Generate an unrolled loop that performs a few probes before - // giving up. Measurements done on Gmail indicate that 2 probes - // cover ~93% of loads from dictionaries. - for (int i = 0; i < kInlinedProbes; i++) { - // Compute the masked index: (hash + i + i * i) & mask. - __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); - if (i > 0) { - // Add the probe offset (i + i * i) left shifted to avoid right shifting - // the hash in a separate instruction. The value hash + i + i * i is right - // shifted in the following and instruction. - ASSERT(StringDictionary::GetProbeOffset(i) < - 1 << (32 - String::kHashFieldOffset)); - __ add(scratch2, scratch2, Operand( - StringDictionary::GetProbeOffset(i) << String::kHashShift)); + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); } - __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift)); - - // Scale the index by multiplying by the element size. - ASSERT(StringDictionary::kEntrySize == 3); - // scratch2 = scratch2 * 3. - __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); - - // Check if the key is identical to the name. - __ add(scratch2, elements, Operand(scratch2, LSL, 2)); - __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); - __ cmp(name, Operand(ip)); - __ b(eq, done); } - - const int spill_mask = - (lr.bit() | r6.bit() | r5.bit() | r4.bit() | - r3.bit() | r2.bit() | r1.bit() | r0.bit()) & - ~(scratch1.bit() | scratch2.bit()); - - __ stm(db_w, sp, spill_mask); - __ Move(r0, elements); - __ Move(r1, name); - StringDictionaryLookupStub stub(POSITIVE_LOOKUP); - __ CallStub(&stub); - __ tst(r0, Operand(r0)); - __ mov(scratch2, Operand(r2)); - __ ldm(ia_w, sp, spill_mask); - - __ b(ne, done); - __ b(eq, miss); + __ SmiUntag(scratch2, key); + + // Verify that the receiver has pixel array elements. + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex, + not_pixel_array, true); + + // Key must be in range of the pixel array. + __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(scratch2, scratch1); + __ b(hs, out_of_range); // unsigned check handles negative keys. + + // Perform the indexed load and tag the result as a smi. + __ ldr(scratch1, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ ldrb(scratch1, MemOperand(scratch1, scratch2)); + __ SmiTag(r0, scratch1); + __ Ret(); } -void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { - // Registers: - // result: StringDictionary to probe - // r1: key - // : StringDictionary to probe. - // index_: will hold an index of entry if lookup is successful. - // might alias with result_. - // Returns: - // result_ is zero if lookup failed, non zero otherwise. - - Register result = r0; - Register dictionary = r0; - Register key = r1; - Register index = r2; - Register mask = r3; - Register hash = r4; - Register undefined = r5; - Register entry_key = r6; - - Label in_dictionary, maybe_in_dictionary, not_in_dictionary; - - __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); - __ mov(mask, Operand(mask, ASR, kSmiTagSize)); - __ sub(mask, mask, Operand(1)); - - __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset)); +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range) { + // Register use: + // receiver - holds the receiver and is unchanged unless the + // store succeeds. + // key - holds the key (must be a smi) and is unchanged. + // value - holds the value (must be a smi) and is unchanged. + // elements - holds the element object of the receiver on entry if + // load_elements_from_receiver is false, otherwise used + // internally to store the pixel arrays elements and + // external array pointer. + // elements_map - holds the map of the element object if + // load_elements_map_from_elements is false, otherwise + // loaded with the element map. + // + Register external_pointer = elements; + Register untagged_key = scratch1; + Register untagged_value = scratch2; - __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + if (load_elements_from_receiver) { + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + } - for (int i = kInlinedProbes; i < kTotalProbes; i++) { - // Compute the masked index: (hash + i + i * i) & mask. - // Capacity is smi 2^n. - if (i > 0) { - // Add the probe offset (i + i * i) left shifted to avoid right shifting - // the hash in a separate instruction. The value hash + i + i * i is right - // shifted in the following and instruction. - ASSERT(StringDictionary::GetProbeOffset(i) < - 1 << (32 - String::kHashFieldOffset)); - __ add(index, hash, Operand( - StringDictionary::GetProbeOffset(i) << String::kHashShift)); - } else { - __ mov(index, Operand(hash)); + // By passing NULL as not_pixel_array, callers signal that they have already + // verified that the receiver has pixel array elements. + if (not_pixel_array != NULL) { + if (load_elements_map_from_elements) { + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); } - __ and_(index, mask, Operand(index, LSR, String::kHashShift)); - - // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); - __ add(index, index, Operand(index, LSL, 1)); // index *= 3. - - ASSERT_EQ(kSmiTagSize, 1); - __ add(index, dictionary, Operand(index, LSL, 2)); - __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); - - // Having undefined at this place means the name is not contained. - __ cmp(entry_key, Operand(undefined)); - __ b(eq, ¬_in_dictionary); - - // Stop if found the property. - __ cmp(entry_key, Operand(key)); - __ b(eq, &in_dictionary); - - if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { - // Check if the entry name is not a symbol. - __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); - __ ldrb(entry_key, - FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ tst(entry_key, Operand(kIsSymbolMask)); - __ b(eq, &maybe_in_dictionary); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ b(ne, not_pixel_array); + } else { + if (FLAG_debug_code) { + // Map check should have already made sure that elements is a pixel array. + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(elements_map, ip); + __ Assert(eq, "Elements isn't a pixel array"); } } - __ bind(&maybe_in_dictionary); - // If we are doing negative lookup then probing failure should be - // treated as a lookup success. For positive lookup probing failure - // should be treated as lookup failure. - if (mode_ == POSITIVE_LOOKUP) { - __ mov(result, Operand(0)); - __ Ret(); + // Some callers already have verified that the key is a smi. key_not_smi is + // set to NULL as a sentinel for that case. Otherwise, add an explicit check + // to ensure the key is a smi must be added. + if (key_not_smi != NULL) { + __ JumpIfNotSmi(key, key_not_smi); + } else { + if (FLAG_debug_code) { + __ AbortIfNotSmi(key); + } } - __ bind(&in_dictionary); - __ mov(result, Operand(1)); - __ Ret(); + __ SmiUntag(untagged_key, key); + + // Perform bounds check. + __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(untagged_key, scratch2); + __ b(hs, out_of_range); // unsigned check handles negative keys. + + __ JumpIfNotSmi(value, value_not_smi); + __ SmiUntag(untagged_value, value); - __ bind(¬_in_dictionary); - __ mov(result, Operand(0)); + // Clamp the value to [0..255]. + __ Usat(untagged_value, 8, Operand(untagged_value)); + // Get the pointer to the external array. This clobbers elements. + __ ldr(external_pointer, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); __ Ret(); } diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 7427351308..e0d05a3b8d 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -38,120 +38,202 @@ namespace internal { // TranscendentalCache runtime function. class TranscendentalCacheStub: public CodeStub { public: - enum ArgumentType { - TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, - UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits - }; - - TranscendentalCacheStub(TranscendentalCache::Type type, - ArgumentType argument_type) - : type_(type), argument_type_(argument_type) { } + explicit TranscendentalCacheStub(TranscendentalCache::Type type) + : type_(type) {} void Generate(MacroAssembler* masm); private: TranscendentalCache::Type type_; - ArgumentType argument_type_; - void GenerateCallCFunction(MacroAssembler* masm, Register scratch); - Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_ | argument_type_; } + int MinorKey() { return type_; } Runtime::FunctionId RuntimeFunction(); }; -class UnaryOpStub: public CodeStub { +class ToBooleanStub: public CodeStub { + public: + explicit ToBooleanStub(Register tos) : tos_(tos) { } + + void Generate(MacroAssembler* masm); + + private: + Register tos_; + Major MajorKey() { return ToBoolean; } + int MinorKey() { return tos_.code(); } +}; + + +class GenericBinaryOpStub : public CodeStub { public: - UnaryOpStub(Token::Value op, - UnaryOverwriteMode mode, - UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) + static const int kUnknownIntValue = -1; + + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + Register lhs, + Register rhs, + int constant_rhs = kUnknownIntValue) : op_(op), mode_(mode), - operand_type_(operand_type), - name_(NULL) { - } + lhs_(lhs), + rhs_(rhs), + constant_rhs_(constant_rhs), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), + runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI), + name_(NULL) { } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + lhs_(LhsRegister(RegisterBits::decode(key))), + rhs_(RhsRegister(RegisterBits::decode(key))), + constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)), + runtime_operands_type_(type_info), + name_(NULL) { } private: Token::Value op_; - UnaryOverwriteMode mode_; + OverwriteMode mode_; + Register lhs_; + Register rhs_; + int constant_rhs_; + bool specialized_on_rhs_; + BinaryOpIC::TypeInfo runtime_operands_type_; + char* name_; - // Operand type information determined at runtime. - UnaryOpIC::TypeInfo operand_type_; + static const int kMaxKnownRhs = 0x40000000; + static const int kKnownRhsKeyBits = 6; - char* name_; + // Minor key encoding in 17 bits. + class ModeBits: public BitField<OverwriteMode, 0, 2> {}; + class OpBits: public BitField<Token::Value, 2, 6> {}; + class TypeInfoBits: public BitField<int, 8, 3> {}; + class RegisterBits: public BitField<bool, 11, 1> {}; + class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {}; - const char* GetName(); + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + // Encode the parameters in a unique 18 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | KnownIntBits::encode(MinorKeyForKnownInt()) + | TypeInfoBits::encode(runtime_operands_type_) + | RegisterBits::encode(lhs_.is(r0)); + } -#ifdef DEBUG - void Print() { - PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - UnaryOpIC::GetName(operand_type_)); + void Generate(MacroAssembler* masm); + void HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs); + void HandleBinaryOpSlowCases(MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin); + void GenerateTypeTransition(MacroAssembler* masm); + + static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { + if (constant_rhs == kUnknownIntValue) return false; + if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; + if (op == Token::MOD) { + if (constant_rhs <= 1) return false; + if (constant_rhs <= 10) return true; + if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; + return false; + } + return false; } -#endif - class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; - class OpBits: public BitField<Token::Value, 1, 7> {}; - class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {}; + int MinorKeyForKnownInt() { + if (!specialized_on_rhs_) return 0; + if (constant_rhs_ <= 10) return constant_rhs_ + 1; + ASSERT(IsPowerOf2(constant_rhs_)); + int key = 12; + int d = constant_rhs_; + while ((d & 1) == 0) { + key++; + d >>= 1; + } + ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits)); + return key; + } - Major MajorKey() { return UnaryOp; } - int MinorKey() { - return ModeBits::encode(mode_) - | OpBits::encode(op_) - | OperandTypeInfoBits::encode(operand_type_); + int KnownBitsForMinorKey(int key) { + if (!key) return 0; + if (key <= 11) return key - 1; + int d = 1; + while (key != 12) { + key--; + d <<= 1; + } + return d; } - // Note: A lot of the helper functions below will vanish when we use virtual - // function instead of switch more often. - void Generate(MacroAssembler* masm); + Register LhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r0 : r1; + } - void GenerateTypeTransition(MacroAssembler* masm); + Register RhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r1 : r0; + } - void GenerateSmiStub(MacroAssembler* masm); - void GenerateSmiStubSub(MacroAssembler* masm); - void GenerateSmiStubBitNot(MacroAssembler* masm); - void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); - void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); + bool HasSmiSmiFastPath() { + return op_ != Token::DIV; + } - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateHeapNumberStubSub(MacroAssembler* masm); - void GenerateHeapNumberStubBitNot(MacroAssembler* masm); - void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); - void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); + bool ShouldGenerateSmiCode() { + return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } - void GenerateGenericStub(MacroAssembler* masm); - void GenerateGenericStubSub(MacroAssembler* masm); - void GenerateGenericStubBitNot(MacroAssembler* masm); - void GenerateGenericCodeFallback(MacroAssembler* masm); + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } - virtual int GetCodeKind() { return Code::UNARY_OP_IC; } + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } virtual InlineCacheState GetICState() { - return UnaryOpIC::ToState(operand_type_); + return BinaryOpIC::ToState(runtime_operands_type_); } + const char* GetName(); + virtual void FinishCode(Code* code) { - code->set_unary_op_type(operand_type_); + code->set_binary_op_type(runtime_operands_type_); } + +#ifdef DEBUG + void Print() { + if (!specialized_on_rhs_) { + PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); + } else { + PrintF("GenericBinaryOpStub (%s by %d)\n", + Token::String(op_), + constant_rhs_); + } + } +#endif }; -class BinaryOpStub: public CodeStub { +class TypeRecordingBinaryOpStub: public CodeStub { public: - BinaryOpStub(Token::Value op, OverwriteMode mode) + TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode) : op_(op), mode_(mode), - operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED), + operands_type_(TRBinaryOpIC::UNINITIALIZED), + result_type_(TRBinaryOpIC::UNINITIALIZED), name_(NULL) { use_vfp3_ = CpuFeatures::IsSupported(VFP3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } - BinaryOpStub( + TypeRecordingBinaryOpStub( int key, - BinaryOpIC::TypeInfo operands_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) + TRBinaryOpIC::TypeInfo operands_type, + TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED) : op_(OpBits::decode(key)), mode_(ModeBits::decode(key)), use_vfp3_(VFP3Bits::decode(key)), @@ -170,8 +252,8 @@ class BinaryOpStub: public CodeStub { bool use_vfp3_; // Operand type information determined at runtime. - BinaryOpIC::TypeInfo operands_type_; - BinaryOpIC::TypeInfo result_type_; + TRBinaryOpIC::TypeInfo operands_type_; + TRBinaryOpIC::TypeInfo result_type_; char* name_; @@ -179,12 +261,12 @@ class BinaryOpStub: public CodeStub { #ifdef DEBUG void Print() { - PrintF("BinaryOpStub %d (op %s), " + PrintF("TypeRecordingBinaryOpStub %d (op %s), " "(mode %d, runtime_type_info %s)\n", MinorKey(), Token::String(op_), static_cast<int>(mode_), - BinaryOpIC::GetName(operands_type_)); + TRBinaryOpIC::GetName(operands_type_)); } #endif @@ -192,10 +274,10 @@ class BinaryOpStub: public CodeStub { class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class OpBits: public BitField<Token::Value, 2, 7> {}; class VFP3Bits: public BitField<bool, 9, 1> {}; - class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; - class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; + class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {}; + class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {}; - Major MajorKey() { return BinaryOp; } + Major MajorKey() { return TypeRecordingBinaryOp; } int MinorKey() { return OpBits::encode(op_) | ModeBits::encode(mode_) @@ -212,7 +294,6 @@ class BinaryOpStub: public CodeStub { Label* not_numbers, Label* gc_required); void GenerateSmiCode(MacroAssembler* masm, - Label* use_runtime, Label* gc_required, SmiCodeGenerateHeapNumberResults heapnumber_results); void GenerateLoadArguments(MacroAssembler* masm); @@ -223,7 +304,6 @@ class BinaryOpStub: public CodeStub { void GenerateHeapNumberStub(MacroAssembler* masm); void GenerateOddballStub(MacroAssembler* masm); void GenerateStringStub(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); void GenerateGenericStub(MacroAssembler* masm); void GenerateAddStrings(MacroAssembler* masm); void GenerateCallRuntime(MacroAssembler* masm); @@ -238,15 +318,15 @@ class BinaryOpStub: public CodeStub { void GenerateTypeTransition(MacroAssembler* masm); void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; } virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(operands_type_); + return TRBinaryOpIC::ToState(operands_type_); } virtual void FinishCode(Code* code) { - code->set_binary_op_type(operands_type_); - code->set_binary_op_result_type(result_type_); + code->set_type_recording_binary_op_type(operands_type_); + code->set_type_recording_binary_op_result_type(result_type_); } friend class CodeGenerator; @@ -306,7 +386,8 @@ class StringCompareStub: public CodeStub { public: StringCompareStub() { } - // Compares two flat ASCII strings and returns result in r0. + // Compare two flat ASCII strings and returns result in r0. + // Does not use the stack. static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, Register left, Register right, @@ -315,27 +396,107 @@ class StringCompareStub: public CodeStub { Register scratch3, Register scratch4); - // Compares two flat ASCII strings for equality and returns result - // in r0. - static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3); + private: + Major MajorKey() { return StringCompare; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +// This stub can do a fast mod operation without using fp. +// It is tail called from the GenericBinaryOpStub and it always +// returns an answer. It never causes GC so it doesn't need a real frame. +// +// The inputs are always positive Smis. This is never called +// where the denominator is a power of 2. We handle that separately. +// +// If we consider the denominator as an odd number multiplied by a power of 2, +// then: +// * The exponent (power of 2) is in the shift_distance register. +// * The odd number is in the odd_number register. It is always in the range +// of 3 to 25. +// * The bits from the numerator that are to be copied to the answer (there are +// shift_distance of them) are in the mask_bits register. +// * The other bits of the numerator have been shifted down and are in the lhs +// register. +class IntegerModStub : public CodeStub { + public: + IntegerModStub(Register result, + Register shift_distance, + Register odd_number, + Register mask_bits, + Register lhs, + Register scratch) + : result_(result), + shift_distance_(shift_distance), + odd_number_(odd_number), + mask_bits_(mask_bits), + lhs_(lhs), + scratch_(scratch) { + // We don't code these in the minor key, so they should always be the same. + // We don't really want to fix that since this stub is rather large and we + // don't want many copies of it. + ASSERT(shift_distance_.is(r9)); + ASSERT(odd_number_.is(r4)); + ASSERT(mask_bits_.is(r3)); + ASSERT(scratch_.is(r5)); + } private: - virtual Major MajorKey() { return StringCompare; } - virtual int MinorKey() { return 0; } - virtual void Generate(MacroAssembler* masm); - - static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, - Register left, - Register right, - Register length, - Register scratch1, - Register scratch2, - Label* chars_not_equal); + Register result_; + Register shift_distance_; + Register odd_number_; + Register mask_bits_; + Register lhs_; + Register scratch_; + + // Minor key encoding in 16 bits. + class ResultRegisterBits: public BitField<int, 0, 4> {}; + class LhsRegisterBits: public BitField<int, 4, 4> {}; + + Major MajorKey() { return IntegerMod; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return ResultRegisterBits::encode(result_.code()) + | LhsRegisterBits::encode(lhs_.code()); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "IntegerModStub"; } + + // Utility functions. + void DigitSum(MacroAssembler* masm, + Register lhs, + int mask, + int shift, + Label* entry); + void DigitSum(MacroAssembler* masm, + Register lhs, + Register scratch, + int mask, + int shift1, + int shift2, + Label* entry); + void ModGetInRangeBySubtraction(MacroAssembler* masm, + Register lhs, + int shift, + int rhs); + void ModReduce(MacroAssembler* masm, + Register lhs, + int max, + int denominator); + void ModAnswer(MacroAssembler* masm, + Register result, + Register shift_distance, + Register mask_bits, + Register sum_of_digits); + + +#ifdef DEBUG + void Print() { PrintF("IntegerModStub\n"); } +#endif }; @@ -419,9 +580,6 @@ class RegExpCEntryStub: public CodeStub { private: Major MajorKey() { return RegExpCEntry; } int MinorKey() { return 0; } - - bool NeedsImmovableCode() { return true; } - const char* GetName() { return "RegExpCEntryStub"; } }; @@ -441,210 +599,59 @@ class DirectCEntryStub: public CodeStub { private: Major MajorKey() { return DirectCEntry; } int MinorKey() { return 0; } - - bool NeedsImmovableCode() { return true; } - const char* GetName() { return "DirectCEntryStub"; } }; -class FloatingPointHelper : public AllStatic { - public: - enum Destination { - kVFPRegisters, - kCoreRegisters - }; - - - // Loads smis from r0 and r1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is - // floating point registers VFP3 must be supported. If core registers are - // requested when VFP3 is supported d6 and d7 will be scratched. - static void LoadSmis(MacroAssembler* masm, - Destination destination, - Register scratch1, - Register scratch2); - - // Loads objects from r0 and r1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is - // floating point registers VFP3 must be supported. If core registers are - // requested when VFP3 is supported d6 and d7 will still be scratched. If - // either r0 or r1 is not a number (not smi and not heap number object) the - // not_number label is jumped to with r0 and r1 intact. - static void LoadOperands(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); - - // Convert the smi or heap number in object to an int32 using the rules - // for ToInt32 as described in ECMAScript 9.5.: the value is truncated - // and brought into the range -2^31 .. +2^31 - 1. - static void ConvertNumberToInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch, - Label* not_int32); - - // Converts the integer (untagged smi) in |int_scratch| to a double, storing - // the result either in |double_dst| or |dst2:dst1|, depending on - // |destination|. - // Warning: The value in |int_scratch| will be changed in the process! - static void ConvertIntToDouble(MacroAssembler* masm, - Register int_scratch, - Destination destination, - DwVfpRegister double_dst, - Register dst1, - Register dst2, - Register scratch2, - SwVfpRegister single_scratch); - - // Load the number from object into double_dst in the double format. - // Control will jump to not_int32 if the value cannot be exactly represented - // by a 32-bit integer. - // Floating point value in the 32-bit integer range that are not exact integer - // won't be loaded. - static void LoadNumberAsInt32Double(MacroAssembler* masm, - Register object, - Destination destination, - DwVfpRegister double_dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - SwVfpRegister single_scratch, - Label* not_int32); - - // Loads the number from object into dst as a 32-bit integer. - // Control will jump to not_int32 if the object cannot be exactly represented - // by a 32-bit integer. - // Floating point value in the 32-bit integer range that are not exact integer - // won't be converted. - // scratch3 is not used when VFP3 is supported. - static void LoadNumberAsInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, +// Generate code to load an element from a pixel array. The receiver is assumed +// to not be a smi and to have elements, the caller must guarantee this +// precondition. If key is not a smi, then the generated code branches to +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi +// check has already been performed on key so that the smi check is not +// generated. If key is not a valid index within the bounds of the pixel array, +// the generated code jumps to out_of_range. receiver, key and elements are +// unchanged throughout the generated code sequence. +void GenerateFastPixelArrayLoad(MacroAssembler* masm, + Register receiver, + Register key, + Register elements_map, + Register elements, Register scratch1, Register scratch2, - Register scratch3, - DwVfpRegister double_scratch, - Label* not_int32); - - // Generate non VFP3 code to check if a double can be exactly represented by a - // 32-bit integer. This does not check for 0 or -0, which need - // to be checked for separately. - // Control jumps to not_int32 if the value is not a 32-bit integer, and falls - // through otherwise. - // src1 and src2 will be cloberred. - // - // Expected input: - // - src1: higher (exponent) part of the double value. - // - src2: lower (mantissa) part of the double value. - // Output status: - // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) - // - src2: contains 1. - // - other registers are clobbered. - static void DoubleIs32BitInteger(MacroAssembler* masm, - Register src1, - Register src2, - Register dst, - Register scratch, - Label* not_int32); - - // Generates code to call a C function to do a double operation using core - // registers. (Used when VFP3 is not supported.) - // This code never falls through, but returns with a heap number containing - // the result in r0. - // Register heapnumber_result must be a heap number in which the - // result of the operation will be stored. - // Requires the following layout on entry: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - static void CallCCodeForDoubleOperation(MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch); - - private: - static void LoadNumber(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register object, - DwVfpRegister dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); -}; - - -class StringDictionaryLookupStub: public CodeStub { - public: - enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - - explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { } - - void Generate(MacroAssembler* masm); - - MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup( - MacroAssembler* masm, - Label* miss, - Label* done, - Register receiver, - Register properties, - String* name, - Register scratch0); - - static void GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register r0, - Register r1); - - private: - static const int kInlinedProbes = 4; - static const int kTotalProbes = 20; - - static const int kCapacityOffset = - StringDictionary::kHeaderSize + - StringDictionary::kCapacityIndex * kPointerSize; - - static const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; - - -#ifdef DEBUG - void Print() { - PrintF("StringDictionaryLookupStub\n"); - } -#endif - - Major MajorKey() { return StringDictionaryNegativeLookup; } - - int MinorKey() { - return LookupModeBits::encode(mode_); - } - - class LookupModeBits: public BitField<LookupMode, 0, 1> {}; - - LookupMode mode_; -}; - + Register result, + Label* not_pixel_array, + Label* key_not_smi, + Label* out_of_range); + +// Generate code to store an element into a pixel array, clamping values between +// [0..255]. The receiver is assumed to not be a smi and to have elements, the +// caller must guarantee this precondition. If key is not a smi, then the +// generated code branches to key_not_smi. Callers can specify NULL for +// key_not_smi to signal that a smi check has already been performed on key so +// that the smi check is not generated. If value is not a smi, the generated +// code will branch to value_not_smi. If the receiver doesn't have pixel array +// elements, the generated code will branch to not_pixel_array, unless +// not_pixel_array is NULL, in which case the caller must ensure that the +// receiver has pixel array elements. If key is not a valid index within the +// bounds of the pixel array, the generated code jumps to out_of_range. If +// load_elements_from_receiver is true, then the elements of receiver is loaded +// into elements, otherwise elements is assumed to already be the receiver's +// elements. If load_elements_map_from_elements is true, elements_map is loaded +// from elements, otherwise it is assumed to already contain the element map. +void GenerateFastPixelArrayStore(MacroAssembler* masm, + Register receiver, + Register key, + Register value, + Register elements, + Register elements_map, + Register scratch1, + Register scratch2, + bool load_elements_from_receiver, + bool load_elements_map_from_elements, + Label* key_not_smi, + Label* value_not_smi, + Label* not_pixel_array, + Label* out_of_range); } } // namespace v8::internal diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h new file mode 100644 index 0000000000..81ed2d043b --- /dev/null +++ b/deps/v8/src/arm/codegen-arm-inl.h @@ -0,0 +1,48 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef V8_ARM_CODEGEN_ARM_INL_H_ +#define V8_ARM_CODEGEN_ARM_INL_H_ + +#include "virtual-frame-arm.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm_) + +// Platform-specific inline functions. + +void DeferredCode::Jump() { __ jmp(&entry_label_); } +void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); } + +#undef __ + +} } // namespace v8::internal + +#endif // V8_ARM_CODEGEN_ARM_INL_H_ diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index bf748a9b6a..1cd86d1da1 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -29,14 +29,56 @@ #if defined(V8_TARGET_ARCH_ARM) -#include "codegen.h" +#include "bootstrapper.h" +#include "code-stubs.h" +#include "codegen-inl.h" +#include "compiler.h" +#include "debug.h" +#include "ic-inl.h" +#include "jsregexp.h" +#include "jump-target-inl.h" +#include "parser.h" +#include "regexp-macro-assembler.h" +#include "regexp-stack.h" +#include "register-allocator-inl.h" +#include "runtime.h" +#include "scopes.h" +#include "stub-cache.h" +#include "virtual-frame-inl.h" +#include "virtual-frame-arm-inl.h" namespace v8 { namespace internal { + +#define __ ACCESS_MASM(masm_) + +// ------------------------------------------------------------------------- +// Platform-specific DeferredCode functions. + +void DeferredCode::SaveRegisters() { + // On ARM you either have a completely spilled frame or you + // handle it yourself, but at the moment there's no automation + // of registers and deferred code. +} + + +void DeferredCode::RestoreRegisters() { +} + + // ------------------------------------------------------------------------- // Platform-specific RuntimeCallHelper functions. +void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { + frame_state_->frame()->AssertIsSpilled(); +} + + +void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { +} + + void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { masm->EnterInternalFrame(); } @@ -47,6 +89,7320 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { } +// ------------------------------------------------------------------------- +// CodeGenState implementation. + +CodeGenState::CodeGenState(CodeGenerator* owner) + : owner_(owner), + previous_(owner->state()) { + owner->set_state(this); +} + + +ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner, + JumpTarget* true_target, + JumpTarget* false_target) + : CodeGenState(owner), + true_target_(true_target), + false_target_(false_target) { + owner->set_state(this); +} + + +TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner, + Slot* slot, + TypeInfo type_info) + : CodeGenState(owner), + slot_(slot) { + owner->set_state(this); + old_type_info_ = owner->set_type_info(slot, type_info); +} + + +CodeGenState::~CodeGenState() { + ASSERT(owner_->state() == this); + owner_->set_state(previous_); +} + + +TypeInfoCodeGenState::~TypeInfoCodeGenState() { + owner()->set_type_info(slot_, old_type_info_); +} + +// ------------------------------------------------------------------------- +// CodeGenerator implementation + +int CodeGenerator::inlined_write_barrier_size_ = -1; + +CodeGenerator::CodeGenerator(MacroAssembler* masm) + : deferred_(8), + masm_(masm), + info_(NULL), + frame_(NULL), + allocator_(NULL), + cc_reg_(al), + state_(NULL), + loop_nesting_(0), + type_info_(NULL), + function_return_(JumpTarget::BIDIRECTIONAL), + function_return_is_shadowed_(false) { +} + + +// Calling conventions: +// fp: caller's frame pointer +// sp: stack pointer +// r1: called JS function +// cp: callee's context + +void CodeGenerator::Generate(CompilationInfo* info) { + // Record the position for debugging purposes. + CodeForFunctionPosition(info->function()); + Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); + + // Initialize state. + info_ = info; + + int slots = scope()->num_parameters() + scope()->num_stack_slots(); + ScopedVector<TypeInfo> type_info_array(slots); + for (int i = 0; i < slots; i++) { + type_info_array[i] = TypeInfo::Unknown(); + } + type_info_ = &type_info_array; + + ASSERT(allocator_ == NULL); + RegisterAllocator register_allocator(this); + allocator_ = ®ister_allocator; + ASSERT(frame_ == NULL); + frame_ = new VirtualFrame(); + cc_reg_ = al; + + // Adjust for function-level loop nesting. + ASSERT_EQ(0, loop_nesting_); + loop_nesting_ = info->is_in_loop() ? 1 : 0; + + { + CodeGenState state(this); + + // Entry: + // Stack: receiver, arguments + // lr: return address + // fp: caller's frame pointer + // sp: stack pointer + // r1: called JS function + // cp: callee's context + allocator_->Initialize(); + +#ifdef DEBUG + if (strlen(FLAG_stop_at) > 0 && + info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { + frame_->SpillAll(); + __ stop("stop-at"); + } +#endif + + frame_->Enter(); + // tos: code slot + + // Allocate space for locals and initialize them. This also checks + // for stack overflow. + frame_->AllocateStackSlots(); + + frame_->AssertIsSpilled(); + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + // Allocate local context. + // Get outer context and create a new context based on it. + __ ldr(r0, frame_->Function()); + frame_->EmitPush(r0); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + frame_->CallStub(&stub, 1); + } else { + frame_->CallRuntime(Runtime::kNewContext, 1); + } + +#ifdef DEBUG + JumpTarget verified_true; + __ cmp(r0, cp); + verified_true.Branch(eq); + __ stop("NewContext: r0 is expected to be the same as cp"); + verified_true.Bind(); +#endif + // Update context local. + __ str(cp, frame_->Context()); + } + + // TODO(1241774): Improve this code: + // 1) only needed if we have a context + // 2) no need to recompute context ptr every single time + // 3) don't copy parameter operand code from SlotOperand! + { + Comment cmnt2(masm_, "[ copy context parameters into .context"); + // Note that iteration order is relevant here! If we have the same + // parameter twice (e.g., function (x, y, x)), and that parameter + // needs to be copied into the context, it must be the last argument + // passed to the parameter that needs to be copied. This is a rare + // case so we don't check for it, instead we rely on the copying + // order: such a parameter is copied repeatedly into the same + // context location and thus the last value is what is seen inside + // the function. + frame_->AssertIsSpilled(); + for (int i = 0; i < scope()->num_parameters(); i++) { + Variable* par = scope()->parameter(i); + Slot* slot = par->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + ASSERT(!scope()->is_global_scope()); // No params in global scope. + __ ldr(r1, frame_->ParameterAt(i)); + // Loads r2 with context; used below in RecordWrite. + __ str(r1, SlotOperand(slot, r2)); + // Load the offset into r3. + int slot_offset = + FixedArray::kHeaderSize + slot->index() * kPointerSize; + __ RecordWrite(r2, Operand(slot_offset), r3, r1); + } + } + } + + // Store the arguments object. This must happen after context + // initialization because the arguments object may be stored in + // the context. + if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) { + StoreArgumentsObject(true); + } + + // Initialize ThisFunction reference if present. + if (scope()->is_function_scope() && scope()->function() != NULL) { + frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex); + StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT); + } + + // Initialize the function return target after the locals are set + // up, because it needs the expected frame height from the frame. + function_return_.SetExpectedHeight(); + function_return_is_shadowed_ = false; + + // Generate code to 'execute' declarations and initialize functions + // (source elements). In case of an illegal redeclaration we need to + // handle that instead of processing the declarations. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ illegal redeclarations"); + scope()->VisitIllegalRedeclaration(this); + } else { + Comment cmnt(masm_, "[ declarations"); + ProcessDeclarations(scope()->declarations()); + // Bail out if a stack-overflow exception occurred when processing + // declarations. + if (HasStackOverflow()) return; + } + + if (FLAG_trace) { + frame_->CallRuntime(Runtime::kTraceEnter, 0); + // Ignore the return value. + } + + // Compile the body of the function in a vanilla state. Don't + // bother compiling all the code if the scope has an illegal + // redeclaration. + if (!scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ function body"); +#ifdef DEBUG + bool is_builtin = Bootstrapper::IsActive(); + bool should_trace = + is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls; + if (should_trace) { + frame_->CallRuntime(Runtime::kDebugTrace, 0); + // Ignore the return value. + } +#endif + VisitStatements(info->function()->body()); + } + } + + // Handle the return from the function. + if (has_valid_frame()) { + // If there is a valid frame, control flow can fall off the end of + // the body. In that case there is an implicit return statement. + ASSERT(!function_return_is_shadowed_); + frame_->PrepareForReturn(); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + if (function_return_.is_bound()) { + function_return_.Jump(); + } else { + function_return_.Bind(); + GenerateReturnSequence(); + } + } else if (function_return_.is_linked()) { + // If the return target has dangling jumps to it, then we have not + // yet generated the return sequence. This can happen when (a) + // control does not flow off the end of the body so we did not + // compile an artificial return statement just above, and (b) there + // are return statements in the body but (c) they are all shadowed. + function_return_.Bind(); + GenerateReturnSequence(); + } + + // Adjust for function-level loop nesting. + ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0); + loop_nesting_ = 0; + + // Code generation state must be reset. + ASSERT(!has_cc()); + ASSERT(state_ == NULL); + ASSERT(loop_nesting() == 0); + ASSERT(!function_return_is_shadowed_); + function_return_.Unuse(); + DeleteFrame(); + + // Process any deferred code using the register allocator. + if (!HasStackOverflow()) { + ProcessDeferred(); + } + + allocator_ = NULL; + type_info_ = NULL; +} + + +int CodeGenerator::NumberOfSlot(Slot* slot) { + if (slot == NULL) return kInvalidSlotNumber; + switch (slot->type()) { + case Slot::PARAMETER: + return slot->index(); + case Slot::LOCAL: + return slot->index() + scope()->num_parameters(); + default: + break; + } + return kInvalidSlotNumber; +} + + +MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { + // Currently, this assertion will fail if we try to assign to + // a constant variable that is constant because it is read-only + // (such as the variable referring to a named function expression). + // We need to implement assignments to read-only variables. + // Ideally, we should do this during AST generation (by converting + // such assignments into expression statements); however, in general + // we may not be able to make the decision until past AST generation, + // that is when the entire program is known. + ASSERT(slot != NULL); + int index = slot->index(); + switch (slot->type()) { + case Slot::PARAMETER: + return frame_->ParameterAt(index); + + case Slot::LOCAL: + return frame_->LocalAt(index); + + case Slot::CONTEXT: { + // Follow the context chain if necessary. + ASSERT(!tmp.is(cp)); // do not overwrite context register + Register context = cp; + int chain_length = scope()->ContextChainLength(slot->var()->scope()); + for (int i = 0; i < chain_length; i++) { + // Load the closure. + // (All contexts, even 'with' contexts, have a closure, + // and it is the same for all contexts inside a function. + // There is no need to go to the function context first.) + __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); + // Load the function context (which is the incoming, outer context). + __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); + context = tmp; + } + // We may have a 'with' context now. Get the function context. + // (In fact this mov may never be the needed, since the scope analysis + // may not permit a direct context access in this case and thus we are + // always at a function context. However it is safe to dereference be- + // cause the function context of a function context is itself. Before + // deleting this mov we should try to create a counter-example first, + // though...) + __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(tmp, index); + } + + default: + UNREACHABLE(); + return MemOperand(r0, 0); + } +} + + +MemOperand CodeGenerator::ContextSlotOperandCheckExtensions( + Slot* slot, + Register tmp, + Register tmp2, + JumpTarget* slow) { + ASSERT(slot->type() == Slot::CONTEXT); + Register context = cp; + + for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); + __ tst(tmp2, tmp2); + slow->Branch(ne); + } + __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); + __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); + context = tmp; + } + } + // Check that last extension is NULL. + __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); + __ tst(tmp2, tmp2); + slow->Branch(ne); + __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(tmp, slot->index()); +} + + +// Loads a value on TOS. If it is a boolean value, the result may have been +// (partially) translated into branches, or it may have set the condition +// code register. If force_cc is set, the value is forced to set the +// condition code register and no value is pushed. If the condition code +// register was set, has_cc() is true and cc_reg_ contains the condition to +// test for 'true'. +void CodeGenerator::LoadCondition(Expression* x, + JumpTarget* true_target, + JumpTarget* false_target, + bool force_cc) { + ASSERT(!has_cc()); + int original_height = frame_->height(); + + { ConditionCodeGenState new_state(this, true_target, false_target); + Visit(x); + + // If we hit a stack overflow, we may not have actually visited + // the expression. In that case, we ensure that we have a + // valid-looking frame state because we will continue to generate + // code as we unwind the C++ stack. + // + // It's possible to have both a stack overflow and a valid frame + // state (eg, a subexpression overflowed, visiting it returned + // with a dummied frame state, and visiting this expression + // returned with a normal-looking state). + if (HasStackOverflow() && + has_valid_frame() && + !has_cc() && + frame_->height() == original_height) { + true_target->Jump(); + } + } + if (force_cc && frame_ != NULL && !has_cc()) { + // Convert the TOS value to a boolean in the condition code register. + ToBoolean(true_target, false_target); + } + ASSERT(!force_cc || !has_valid_frame() || has_cc()); + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height) || + (!has_cc() && frame_->height() == original_height + 1)); +} + + +void CodeGenerator::Load(Expression* expr) { + // We generally assume that we are not in a spilled scope for most + // of the code generator. A failure to ensure this caused issue 815 + // and this assert is designed to catch similar issues. + frame_->AssertIsNotSpilled(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + JumpTarget true_target; + JumpTarget false_target; + LoadCondition(expr, &true_target, &false_target, false); + + if (has_cc()) { + // Convert cc_reg_ into a boolean value. + JumpTarget loaded; + JumpTarget materialize_true; + materialize_true.Branch(cc_reg_); + frame_->EmitPushRoot(Heap::kFalseValueRootIndex); + loaded.Jump(); + materialize_true.Bind(); + frame_->EmitPushRoot(Heap::kTrueValueRootIndex); + loaded.Bind(); + cc_reg_ = al; + } + + if (true_target.is_linked() || false_target.is_linked()) { + // We have at least one condition value that has been "translated" + // into a branch, thus it needs to be loaded explicitly. + JumpTarget loaded; + if (frame_ != NULL) { + loaded.Jump(); // Don't lose the current TOS. + } + bool both = true_target.is_linked() && false_target.is_linked(); + // Load "true" if necessary. + if (true_target.is_linked()) { + true_target.Bind(); + frame_->EmitPushRoot(Heap::kTrueValueRootIndex); + } + // If both "true" and "false" need to be loaded jump across the code for + // "false". + if (both) { + loaded.Jump(); + } + // Load "false" if necessary. + if (false_target.is_linked()) { + false_target.Bind(); + frame_->EmitPushRoot(Heap::kFalseValueRootIndex); + } + // A value is loaded on all paths reaching this point. + loaded.Bind(); + } + ASSERT(has_valid_frame()); + ASSERT(!has_cc()); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::LoadGlobal() { + Register reg = frame_->GetTOSRegister(); + __ ldr(reg, GlobalObjectOperand()); + frame_->EmitPush(reg); +} + + +void CodeGenerator::LoadGlobalReceiver(Register scratch) { + Register reg = frame_->GetTOSRegister(); + __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ ldr(reg, + FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset)); + frame_->EmitPush(reg); +} + + +ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { + if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; + ASSERT(scope()->arguments_shadow() != NULL); + // We don't want to do lazy arguments allocation for functions that + // have heap-allocated contexts, because it interfers with the + // uninitialized const tracking in the context objects. + return (scope()->num_heap_slots() > 0) + ? EAGER_ARGUMENTS_ALLOCATION + : LAZY_ARGUMENTS_ALLOCATION; +} + + +void CodeGenerator::StoreArgumentsObject(bool initial) { + ArgumentsAllocationMode mode = ArgumentsMode(); + ASSERT(mode != NO_ARGUMENTS_ALLOCATION); + + Comment cmnt(masm_, "[ store arguments object"); + if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { + // When using lazy arguments allocation, we store the hole value + // as a sentinel indicating that the arguments object hasn't been + // allocated yet. + frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex); + } else { + frame_->SpillAll(); + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); + __ ldr(r2, frame_->Function()); + // The receiver is below the arguments, the return address, and the + // frame pointer on the stack. + const int kReceiverDisplacement = 2 + scope()->num_parameters(); + __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize)); + __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters()))); + frame_->Adjust(3); + __ Push(r2, r1, r0); + frame_->CallStub(&stub, 3); + frame_->EmitPush(r0); + } + + Variable* arguments = scope()->arguments(); + Variable* shadow = scope()->arguments_shadow(); + ASSERT(arguments != NULL && arguments->AsSlot() != NULL); + ASSERT(shadow != NULL && shadow->AsSlot() != NULL); + JumpTarget done; + if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { + // We have to skip storing into the arguments slot if it has + // already been written to. This can happen if the a function + // has a local variable named 'arguments'. + LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF); + Register arguments = frame_->PopToRegister(); + __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex); + __ cmp(arguments, ip); + done.Branch(ne); + } + StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT); + if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); + StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT); +} + + +void CodeGenerator::LoadTypeofExpression(Expression* expr) { + // Special handling of identifiers as subexpressions of typeof. + Variable* variable = expr->AsVariableProxy()->AsVariable(); + if (variable != NULL && !variable->is_this() && variable->is_global()) { + // For a global variable we build the property reference + // <global>.<variable> and perform a (regular non-contextual) property + // load to make sure we do not get reference errors. + Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); + Literal key(variable->name()); + Property property(&global, &key, RelocInfo::kNoPosition); + Reference ref(this, &property); + ref.GetValue(); + } else if (variable != NULL && variable->AsSlot() != NULL) { + // For a variable that rewrites to a slot, we signal it is the immediate + // subexpression of a typeof. + LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF); + } else { + // Anything else can be handled normally. + Load(expr); + } +} + + +Reference::Reference(CodeGenerator* cgen, + Expression* expression, + bool persist_after_get) + : cgen_(cgen), + expression_(expression), + type_(ILLEGAL), + persist_after_get_(persist_after_get) { + // We generally assume that we are not in a spilled scope for most + // of the code generator. A failure to ensure this caused issue 815 + // and this assert is designed to catch similar issues. + cgen->frame()->AssertIsNotSpilled(); + cgen->LoadReference(this); +} + + +Reference::~Reference() { + ASSERT(is_unloaded() || is_illegal()); +} + + +void CodeGenerator::LoadReference(Reference* ref) { + Comment cmnt(masm_, "[ LoadReference"); + Expression* e = ref->expression(); + Property* property = e->AsProperty(); + Variable* var = e->AsVariableProxy()->AsVariable(); + + if (property != NULL) { + // The expression is either a property or a variable proxy that rewrites + // to a property. + Load(property->obj()); + if (property->key()->IsPropertyName()) { + ref->set_type(Reference::NAMED); + } else { + Load(property->key()); + ref->set_type(Reference::KEYED); + } + } else if (var != NULL) { + // The expression is a variable proxy that does not rewrite to a + // property. Global variables are treated as named property references. + if (var->is_global()) { + LoadGlobal(); + ref->set_type(Reference::NAMED); + } else { + ASSERT(var->AsSlot() != NULL); + ref->set_type(Reference::SLOT); + } + } else { + // Anything else is a runtime error. + Load(e); + frame_->CallRuntime(Runtime::kThrowReferenceError, 1); + } +} + + +void CodeGenerator::UnloadReference(Reference* ref) { + int size = ref->size(); + ref->set_unloaded(); + if (size == 0) return; + + // Pop a reference from the stack while preserving TOS. + VirtualFrame::RegisterAllocationScope scope(this); + Comment cmnt(masm_, "[ UnloadReference"); + if (size > 0) { + Register tos = frame_->PopToRegister(); + frame_->Drop(size); + frame_->EmitPush(tos); + } +} + + +// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given +// register to a boolean in the condition code register. The code +// may jump to 'false_target' in case the register converts to 'false'. +void CodeGenerator::ToBoolean(JumpTarget* true_target, + JumpTarget* false_target) { + // Note: The generated code snippet does not change stack variables. + // Only the condition code should be set. + bool known_smi = frame_->KnownSmiAt(0); + Register tos = frame_->PopToRegister(); + + // Fast case checks + + // Check if the value is 'false'. + if (!known_smi) { + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(tos, ip); + false_target->Branch(eq); + + // Check if the value is 'true'. + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(tos, ip); + true_target->Branch(eq); + + // Check if the value is 'undefined'. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(tos, ip); + false_target->Branch(eq); + } + + // Check if the value is a smi. + __ cmp(tos, Operand(Smi::FromInt(0))); + + if (!known_smi) { + false_target->Branch(eq); + __ tst(tos, Operand(kSmiTagMask)); + true_target->Branch(eq); + + // Slow case. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Implements the slow case by using ToBooleanStub. + // The ToBooleanStub takes a single argument, and + // returns a non-zero value for true, or zero for false. + // Both the argument value and the return value use the + // register assigned to tos_ + ToBooleanStub stub(tos); + frame_->CallStub(&stub, 0); + // Convert the result in "tos" to a condition code. + __ cmp(tos, Operand(0, RelocInfo::NONE)); + } else { + // Implements slow case by calling the runtime. + frame_->EmitPush(tos); + frame_->CallRuntime(Runtime::kToBool, 1); + // Convert the result (r0) to a condition code. + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(r0, ip); + } + } + + cc_reg_ = ne; +} + + +void CodeGenerator::GenericBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + GenerateInlineSmi inline_smi, + int constant_rhs) { + // top of virtual frame: y + // 2nd elt. on virtual frame : x + // result : top of virtual frame + + // Stub is entered with a call: 'return address' is in lr. + switch (op) { + case Token::ADD: + case Token::SUB: + if (inline_smi) { + JumpTarget done; + Register rhs = frame_->PopToRegister(); + Register lhs = frame_->PopToRegister(rhs); + Register scratch = VirtualFrame::scratch0(); + __ orr(scratch, rhs, Operand(lhs)); + // Check they are both small and positive. + __ tst(scratch, Operand(kSmiTagMask | 0xc0000000)); + ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now. + STATIC_ASSERT(kSmiTag == 0); + if (op == Token::ADD) { + __ add(r0, lhs, Operand(rhs), LeaveCC, eq); + } else { + __ sub(r0, lhs, Operand(rhs), LeaveCC, eq); + } + done.Branch(eq); + GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); + frame_->SpillAll(); + frame_->CallStub(&stub, 0); + done.Bind(); + frame_->EmitPush(r0); + break; + } else { + // Fall through! + } + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + if (inline_smi) { + bool rhs_is_smi = frame_->KnownSmiAt(0); + bool lhs_is_smi = frame_->KnownSmiAt(1); + Register rhs = frame_->PopToRegister(); + Register lhs = frame_->PopToRegister(rhs); + Register smi_test_reg; + Condition cond; + if (!rhs_is_smi || !lhs_is_smi) { + if (rhs_is_smi) { + smi_test_reg = lhs; + } else if (lhs_is_smi) { + smi_test_reg = rhs; + } else { + smi_test_reg = VirtualFrame::scratch0(); + __ orr(smi_test_reg, rhs, Operand(lhs)); + } + // Check they are both Smis. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + cond = eq; + } else { + cond = al; + } + ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now. + if (op == Token::BIT_OR) { + __ orr(r0, lhs, Operand(rhs), LeaveCC, cond); + } else if (op == Token::BIT_AND) { + __ and_(r0, lhs, Operand(rhs), LeaveCC, cond); + } else { + ASSERT(op == Token::BIT_XOR); + STATIC_ASSERT(kSmiTag == 0); + __ eor(r0, lhs, Operand(rhs), LeaveCC, cond); + } + if (cond != al) { + JumpTarget done; + done.Branch(cond); + GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); + frame_->SpillAll(); + frame_->CallStub(&stub, 0); + done.Bind(); + } + frame_->EmitPush(r0); + break; + } else { + // Fall through! + } + case Token::MUL: + case Token::DIV: + case Token::MOD: + case Token::SHL: + case Token::SHR: + case Token::SAR: { + Register rhs = frame_->PopToRegister(); + Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register. + GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs); + frame_->SpillAll(); + frame_->CallStub(&stub, 0); + frame_->EmitPush(r0); + break; + } + + case Token::COMMA: { + Register scratch = frame_->PopToRegister(); + // Simply discard left value. + frame_->Drop(); + frame_->EmitPush(scratch); + break; + } + + default: + // Other cases should have been handled before this point. + UNREACHABLE(); + break; + } +} + + +class DeferredInlineSmiOperation: public DeferredCode { + public: + DeferredInlineSmiOperation(Token::Value op, + int value, + bool reversed, + OverwriteMode overwrite_mode, + Register tos) + : op_(op), + value_(value), + reversed_(reversed), + overwrite_mode_(overwrite_mode), + tos_register_(tos) { + set_comment("[ DeferredInlinedSmiOperation"); + } + + virtual void Generate(); + // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and + // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty + // methods, it is the responsibility of the deferred code to save and restore + // registers. + virtual bool AutoSaveAndRestore() { return false; } + + void JumpToNonSmiInput(Condition cond); + void JumpToAnswerOutOfRange(Condition cond); + + private: + void GenerateNonSmiInput(); + void GenerateAnswerOutOfRange(); + void WriteNonSmiAnswer(Register answer, + Register heap_number, + Register scratch); + + Token::Value op_; + int value_; + bool reversed_; + OverwriteMode overwrite_mode_; + Register tos_register_; + Label non_smi_input_; + Label answer_out_of_range_; +}; + + +// For bit operations we try harder and handle the case where the input is not +// a Smi but a 32bits integer without calling the generic stub. +void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) { + ASSERT(Token::IsBitOp(op_)); + + __ b(cond, &non_smi_input_); +} + + +// For bit operations the result is always 32bits so we handle the case where +// the result does not fit in a Smi without calling the generic stub. +void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) { + ASSERT(Token::IsBitOp(op_)); + + if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) { + // >>> requires an unsigned to double conversion and the non VFP code + // does not support this conversion. + __ b(cond, entry_label()); + } else { + __ b(cond, &answer_out_of_range_); + } +} + + +// On entry the non-constant side of the binary operation is in tos_register_ +// and the constant smi side is nowhere. The tos_register_ is not used by the +// virtual frame. On exit the answer is in the tos_register_ and the virtual +// frame is unchanged. +void DeferredInlineSmiOperation::Generate() { + VirtualFrame copied_frame(*frame_state()->frame()); + copied_frame.SpillAll(); + + Register lhs = r1; + Register rhs = r0; + switch (op_) { + case Token::ADD: { + // Revert optimistic add. + if (reversed_) { + __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); + __ mov(r1, Operand(Smi::FromInt(value_))); + } else { + __ sub(r1, tos_register_, Operand(Smi::FromInt(value_))); + __ mov(r0, Operand(Smi::FromInt(value_))); + } + break; + } + + case Token::SUB: { + // Revert optimistic sub. + if (reversed_) { + __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_))); + __ mov(r1, Operand(Smi::FromInt(value_))); + } else { + __ add(r1, tos_register_, Operand(Smi::FromInt(value_))); + __ mov(r0, Operand(Smi::FromInt(value_))); + } + break; + } + + // For these operations there is no optimistic operation that needs to be + // reverted. + case Token::MUL: + case Token::MOD: + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SHL: + case Token::SHR: + case Token::SAR: { + if (tos_register_.is(r1)) { + __ mov(r0, Operand(Smi::FromInt(value_))); + } else { + ASSERT(tos_register_.is(r0)); + __ mov(r1, Operand(Smi::FromInt(value_))); + } + if (reversed_ == tos_register_.is(r1)) { + lhs = r0; + rhs = r1; + } + break; + } + + default: + // Other cases should have been handled before this point. + UNREACHABLE(); + break; + } + + GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_); + __ CallStub(&stub); + + // The generic stub returns its value in r0, but that's not + // necessarily what we want. We want whatever the inlined code + // expected, which is that the answer is in the same register as + // the operand was. + __ Move(tos_register_, r0); + + // The tos register was not in use for the virtual frame that we + // came into this function with, so we can merge back to that frame + // without trashing it. + copied_frame.MergeTo(frame_state()->frame()); + + Exit(); + + if (non_smi_input_.is_linked()) { + GenerateNonSmiInput(); + } + + if (answer_out_of_range_.is_linked()) { + GenerateAnswerOutOfRange(); + } +} + + +// Convert and write the integer answer into heap_number. +void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer, + Register heap_number, + Register scratch) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, answer); + if (op_ == Token::SHR) { + __ vcvt_f64_u32(d0, s0); + } else { + __ vcvt_f64_s32(d0, s0); + } + __ sub(scratch, heap_number, Operand(kHeapObjectTag)); + __ vstr(d0, scratch, HeapNumber::kValueOffset); + } else { + WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch); + __ CallStub(&stub); + } +} + + +void DeferredInlineSmiOperation::GenerateNonSmiInput() { + // We know the left hand side is not a Smi and the right hand side is an + // immediate value (value_) which can be represented as a Smi. We only + // handle bit operations. + ASSERT(Token::IsBitOp(op_)); + + if (FLAG_debug_code) { + __ Abort("Should not fall through!"); + } + + __ bind(&non_smi_input_); + if (FLAG_debug_code) { + __ AbortIfSmi(tos_register_); + } + + // This routine uses the registers from r2 to r6. At the moment they are + // not used by the register allocator, but when they are it should use + // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above. + + Register heap_number_map = r7; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset)); + __ cmp(r3, heap_number_map); + // Not a number, fall back to the GenericBinaryOpStub. + __ b(ne, entry_label()); + + Register int32 = r2; + // Not a 32bits signed int, fall back to the GenericBinaryOpStub. + __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label()); + + // tos_register_ (r0 or r1): Original heap number. + // int32: signed 32bits int. + + Label result_not_a_smi; + int shift_value = value_ & 0x1f; + switch (op_) { + case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break; + case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break; + case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break; + case Token::SAR: + ASSERT(!reversed_); + if (shift_value != 0) { + __ mov(int32, Operand(int32, ASR, shift_value)); + } + break; + case Token::SHR: + ASSERT(!reversed_); + if (shift_value != 0) { + __ mov(int32, Operand(int32, LSR, shift_value), SetCC); + } else { + // SHR is special because it is required to produce a positive answer. + __ cmp(int32, Operand(0, RelocInfo::NONE)); + } + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, &result_not_a_smi); + } else { + // Non VFP code cannot convert from unsigned to double, so fall back + // to GenericBinaryOpStub. + __ b(mi, entry_label()); + } + break; + case Token::SHL: + ASSERT(!reversed_); + if (shift_value != 0) { + __ mov(int32, Operand(int32, LSL, shift_value)); + } + break; + default: UNREACHABLE(); + } + // Check that the *signed* result fits in a smi. Not necessary for AND, SAR + // if the shift if more than 0 or SHR if the shit is more than 1. + if (!( (op_ == Token::AND && value_ >= 0) || + ((op_ == Token::SAR) && (shift_value > 0)) || + ((op_ == Token::SHR) && (shift_value > 1)))) { + __ add(r3, int32, Operand(0x40000000), SetCC); + __ b(mi, &result_not_a_smi); + } + __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize)); + Exit(); + + if (result_not_a_smi.is_linked()) { + __ bind(&result_not_a_smi); + if (overwrite_mode_ != OVERWRITE_LEFT) { + ASSERT((overwrite_mode_ == NO_OVERWRITE) || + (overwrite_mode_ == OVERWRITE_RIGHT)); + // If the allocation fails, fall back to the GenericBinaryOpStub. + __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label()); + // Nothing can go wrong now, so overwrite tos. + __ mov(tos_register_, Operand(r4)); + } + + // int32: answer as signed 32bits integer. + // tos_register_: Heap number to write the answer into. + WriteNonSmiAnswer(int32, tos_register_, r3); + + Exit(); + } +} + + +void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() { + // The input from a bitwise operation were Smis but the result cannot fit + // into a Smi, so we store it into a heap number. VirtualFrame::scratch0() + // holds the untagged result to be converted. tos_register_ contains the + // input. See the calls to JumpToAnswerOutOfRange to see how we got here. + ASSERT(Token::IsBitOp(op_)); + ASSERT(!reversed_); + + Register untagged_result = VirtualFrame::scratch0(); + + if (FLAG_debug_code) { + __ Abort("Should not fall through!"); + } + + __ bind(&answer_out_of_range_); + if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) { + // >>> 0 is a special case where the untagged_result register is not set up + // yet. We untag the input to get it. + __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize)); + } + + // This routine uses the registers from r2 to r6. At the moment they are + // not used by the register allocator, but when they are it should use + // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above. + + // Allocate the result heap number. + Register heap_number_map = VirtualFrame::scratch1(); + Register heap_number = r4; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + // If the allocation fails, fall back to the GenericBinaryOpStub. + __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label()); + WriteNonSmiAnswer(untagged_result, heap_number, r3); + __ mov(tos_register_, Operand(heap_number)); + + Exit(); +} + + +static bool PopCountLessThanEqual2(unsigned int x) { + x &= x - 1; + return (x & (x - 1)) == 0; +} + + +// Returns the index of the lowest bit set. +static int BitPosition(unsigned x) { + int bit_posn = 0; + while ((x & 0xf) == 0) { + bit_posn += 4; + x >>= 4; + } + while ((x & 1) == 0) { + bit_posn++; + x >>= 1; + } + return bit_posn; +} + + +// Can we multiply by x with max two shifts and an add. +// This answers yes to all integers from 2 to 10. +static bool IsEasyToMultiplyBy(int x) { + if (x < 2) return false; // Avoid special cases. + if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. + if (IsPowerOf2(x)) return true; // Simple shift. + if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. + if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. + return false; +} + + +// Can multiply by anything that IsEasyToMultiplyBy returns true for. +// Source and destination may be the same register. This routine does +// not set carry and overflow the way a mul instruction would. +static void InlineMultiplyByKnownInt(MacroAssembler* masm, + Register source, + Register destination, + int known_int) { + if (IsPowerOf2(known_int)) { + masm->mov(destination, Operand(source, LSL, BitPosition(known_int))); + } else if (PopCountLessThanEqual2(known_int)) { + int first_bit = BitPosition(known_int); + int second_bit = BitPosition(known_int ^ (1 << first_bit)); + masm->add(destination, source, + Operand(source, LSL, second_bit - first_bit)); + if (first_bit != 0) { + masm->mov(destination, Operand(destination, LSL, first_bit)); + } + } else { + ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. + int the_bit = BitPosition(known_int + 1); + masm->rsb(destination, source, Operand(source, LSL, the_bit)); + } +} + + +void CodeGenerator::SmiOperation(Token::Value op, + Handle<Object> value, + bool reversed, + OverwriteMode mode) { + int int_value = Smi::cast(*value)->value(); + + bool both_sides_are_smi = frame_->KnownSmiAt(0); + + bool something_to_inline; + switch (op) { + case Token::ADD: + case Token::SUB: + case Token::BIT_AND: + case Token::BIT_OR: + case Token::BIT_XOR: { + something_to_inline = true; + break; + } + case Token::SHL: { + something_to_inline = (both_sides_are_smi || !reversed); + break; + } + case Token::SHR: + case Token::SAR: { + if (reversed) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + case Token::MOD: { + if (reversed || int_value < 2 || !IsPowerOf2(int_value)) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + case Token::MUL: { + if (!IsEasyToMultiplyBy(int_value)) { + something_to_inline = false; + } else { + something_to_inline = true; + } + break; + } + default: { + something_to_inline = false; + break; + } + } + + if (!something_to_inline) { + if (!reversed) { + // Push the rhs onto the virtual frame by putting it in a TOS register. + Register rhs = frame_->GetTOSRegister(); + __ mov(rhs, Operand(value)); + frame_->EmitPush(rhs, TypeInfo::Smi()); + GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value); + } else { + // Pop the rhs, then push lhs and rhs in the right order. Only performs + // at most one pop, the rest takes place in TOS registers. + Register lhs = frame_->GetTOSRegister(); // Get reg for pushing. + Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for this. + __ mov(lhs, Operand(value)); + frame_->EmitPush(lhs, TypeInfo::Smi()); + TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown(); + frame_->EmitPush(rhs, t); + GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, + GenericBinaryOpStub::kUnknownIntValue); + } + return; + } + + // We move the top of stack to a register (normally no move is invoved). + Register tos = frame_->PopToRegister(); + switch (op) { + case Token::ADD: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + + __ add(tos, tos, Operand(value), SetCC); + deferred->Branch(vs); + if (!both_sides_are_smi) { + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + } + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + case Token::SUB: { + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + + if (reversed) { + __ rsb(tos, tos, Operand(value), SetCC); + } else { + __ sub(tos, tos, Operand(value), SetCC); + } + deferred->Branch(vs); + if (!both_sides_are_smi) { + __ tst(tos, Operand(kSmiTagMask)); + deferred->Branch(ne); + } + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: { + if (both_sides_are_smi) { + switch (op) { + case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; + case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; + case Token::BIT_AND: __ And(tos, tos, Operand(value)); break; + default: UNREACHABLE(); + } + frame_->EmitPush(tos, TypeInfo::Smi()); + } else { + DeferredInlineSmiOperation* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + __ tst(tos, Operand(kSmiTagMask)); + deferred->JumpToNonSmiInput(ne); + switch (op) { + case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; + case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; + case Token::BIT_AND: __ And(tos, tos, Operand(value)); break; + default: UNREACHABLE(); + } + deferred->BindExit(); + TypeInfo result_type = TypeInfo::Integer32(); + if (op == Token::BIT_AND && int_value >= 0) { + result_type = TypeInfo::Smi(); + } + frame_->EmitPush(tos, result_type); + } + break; + } + + case Token::SHL: + if (reversed) { + ASSERT(both_sides_are_smi); + int max_shift = 0; + int max_result = int_value == 0 ? 1 : int_value; + while (Smi::IsValid(max_result << 1)) { + max_shift++; + max_result <<= 1; + } + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, true, mode, tos); + // Mask off the last 5 bits of the shift operand (rhs). This is part + // of the definition of shift in JS and we know we have a Smi so we + // can safely do this. The masked version gets passed to the + // deferred code, but that makes no difference. + __ and_(tos, tos, Operand(Smi::FromInt(0x1f))); + __ cmp(tos, Operand(Smi::FromInt(max_shift))); + deferred->Branch(ge); + Register scratch = VirtualFrame::scratch0(); + __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag. + __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant. + __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant. + deferred->BindExit(); + TypeInfo result = TypeInfo::Integer32(); + frame_->EmitPush(tos, result); + break; + } + // Fall through! + case Token::SHR: + case Token::SAR: { + ASSERT(!reversed); + int shift_value = int_value & 0x1f; + TypeInfo result = TypeInfo::Number(); + + if (op == Token::SHR) { + if (shift_value > 1) { + result = TypeInfo::Smi(); + } else if (shift_value > 0) { + result = TypeInfo::Integer32(); + } + } else if (op == Token::SAR) { + if (shift_value > 0) { + result = TypeInfo::Smi(); + } else { + result = TypeInfo::Integer32(); + } + } else { + ASSERT(op == Token::SHL); + result = TypeInfo::Integer32(); + } + + DeferredInlineSmiOperation* deferred = + new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); + if (!both_sides_are_smi) { + __ tst(tos, Operand(kSmiTagMask)); + deferred->JumpToNonSmiInput(ne); + } + switch (op) { + case Token::SHL: { + if (shift_value != 0) { + Register untagged_result = VirtualFrame::scratch0(); + Register scratch = VirtualFrame::scratch1(); + int adjusted_shift = shift_value - kSmiTagSize; + ASSERT(adjusted_shift >= 0); + + if (adjusted_shift != 0) { + __ mov(untagged_result, Operand(tos, LSL, adjusted_shift)); + } else { + __ mov(untagged_result, Operand(tos)); + } + // Check that the *signed* result fits in a smi. + __ add(scratch, untagged_result, Operand(0x40000000), SetCC); + deferred->JumpToAnswerOutOfRange(mi); + __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize)); + } + break; + } + case Token::SHR: { + if (shift_value != 0) { + Register untagged_result = VirtualFrame::scratch0(); + // Remove tag. + __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize)); + __ mov(untagged_result, Operand(untagged_result, LSR, shift_value)); + if (shift_value == 1) { + // Check that the *unsigned* result fits in a smi. + // Neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging + // - 0x40000000: this number would convert to negative when Smi + // tagging. + // These two cases can only happen with shifts by 0 or 1 when + // handed a valid smi. + __ tst(untagged_result, Operand(0xc0000000)); + deferred->JumpToAnswerOutOfRange(ne); + } + __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize)); + } else { + __ cmp(tos, Operand(0, RelocInfo::NONE)); + deferred->JumpToAnswerOutOfRange(mi); + } + break; + } + case Token::SAR: { + if (shift_value != 0) { + // Do the shift and the tag removal in one operation. If the shift + // is 31 bits (the highest possible value) then we emit the + // instruction as a shift by 0 which in the ARM ISA means shift + // arithmetically by 32. + __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); + __ mov(tos, Operand(tos, LSL, kSmiTagSize)); + } + break; + } + default: UNREACHABLE(); + } + deferred->BindExit(); + frame_->EmitPush(tos, result); + break; + } + + case Token::MOD: { + ASSERT(!reversed); + ASSERT(int_value >= 2); + ASSERT(IsPowerOf2(int_value)); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + unsigned mask = (0x80000000u | kSmiTagMask); + __ tst(tos, Operand(mask)); + deferred->Branch(ne); // Go to deferred code on non-Smis and negative. + mask = (int_value << kSmiTagSize) - 1; + __ and_(tos, tos, Operand(mask)); + deferred->BindExit(); + // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer. + frame_->EmitPush( + tos, + both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number()); + break; + } + + case Token::MUL: { + ASSERT(IsEasyToMultiplyBy(int_value)); + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); + unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; + max_smi_that_wont_overflow <<= kSmiTagSize; + unsigned mask = 0x80000000u; + while ((mask & max_smi_that_wont_overflow) == 0) { + mask |= mask >> 1; + } + mask |= kSmiTagMask; + // This does a single mask that checks for a too high value in a + // conservative way and for a non-Smi. It also filters out negative + // numbers, unfortunately, but since this code is inline we prefer + // brevity to comprehensiveness. + __ tst(tos, Operand(mask)); + deferred->Branch(ne); + InlineMultiplyByKnownInt(masm_, tos, tos, int_value); + deferred->BindExit(); + frame_->EmitPush(tos); + break; + } + + default: + UNREACHABLE(); + break; + } +} + + +void CodeGenerator::Comparison(Condition cond, + Expression* left, + Expression* right, + bool strict) { + VirtualFrame::RegisterAllocationScope scope(this); + + if (left != NULL) Load(left); + if (right != NULL) Load(right); + + // sp[0] : y + // sp[1] : x + // result : cc register + + // Strict only makes sense for equality comparisons. + ASSERT(!strict || cond == eq); + + Register lhs; + Register rhs; + + bool lhs_is_smi; + bool rhs_is_smi; + + // We load the top two stack positions into registers chosen by the virtual + // frame. This should keep the register shuffling to a minimum. + // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. + if (cond == gt || cond == le) { + cond = ReverseCondition(cond); + lhs_is_smi = frame_->KnownSmiAt(0); + rhs_is_smi = frame_->KnownSmiAt(1); + lhs = frame_->PopToRegister(); + rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again! + } else { + rhs_is_smi = frame_->KnownSmiAt(0); + lhs_is_smi = frame_->KnownSmiAt(1); + rhs = frame_->PopToRegister(); + lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again! + } + + bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi); + + ASSERT(rhs.is(r0) || rhs.is(r1)); + ASSERT(lhs.is(r0) || lhs.is(r1)); + + JumpTarget exit; + + if (!both_sides_are_smi) { + // Now we have the two sides in r0 and r1. We flush any other registers + // because the stub doesn't know about register allocation. + frame_->SpillAll(); + Register scratch = VirtualFrame::scratch0(); + Register smi_test_reg; + if (lhs_is_smi) { + smi_test_reg = rhs; + } else if (rhs_is_smi) { + smi_test_reg = lhs; + } else { + __ orr(scratch, lhs, Operand(rhs)); + smi_test_reg = scratch; + } + __ tst(smi_test_reg, Operand(kSmiTagMask)); + JumpTarget smi; + smi.Branch(eq); + + // Perform non-smi comparison by stub. + // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. + // We call with 0 args because there are 0 on the stack. + CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs); + frame_->CallStub(&stub, 0); + __ cmp(r0, Operand(0, RelocInfo::NONE)); + exit.Jump(); + + smi.Bind(); + } + + // Do smi comparisons by pointer comparison. + __ cmp(lhs, Operand(rhs)); + + exit.Bind(); + cc_reg_ = cond; +} + + +// Call the function on the stack with the given arguments. +void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, + CallFunctionFlags flags, + int position) { + // Push the arguments ("left-to-right") on the stack. + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + // Record the position for debugging purposes. + CodeForSourcePosition(position); + + // Use the shared code stub to call the function. + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + CallFunctionStub call_function(arg_count, in_loop, flags); + frame_->CallStub(&call_function, arg_count + 1); + + // Restore context and pop function from the stack. + __ ldr(cp, frame_->Context()); + frame_->Drop(); // discard the TOS +} + + +void CodeGenerator::CallApplyLazy(Expression* applicand, + Expression* receiver, + VariableProxy* arguments, + int position) { + // An optimized implementation of expressions of the form + // x.apply(y, arguments). + // If the arguments object of the scope has not been allocated, + // and x.apply is Function.prototype.apply, this optimization + // just copies y and the arguments of the current function on the + // stack, as receiver and arguments, and calls x. + // In the implementation comments, we call x the applicand + // and y the receiver. + + ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION); + ASSERT(arguments->IsArguments()); + + // Load applicand.apply onto the stack. This will usually + // give us a megamorphic load site. Not super, but it works. + Load(applicand); + Handle<String> name = Factory::LookupAsciiSymbol("apply"); + frame_->Dup(); + frame_->CallLoadIC(name, RelocInfo::CODE_TARGET); + frame_->EmitPush(r0); + + // Load the receiver and the existing arguments object onto the + // expression stack. Avoid allocating the arguments object here. + Load(receiver); + LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF); + + // At this point the top two stack elements are probably in registers + // since they were just loaded. Ensure they are in regs and get the + // regs. + Register receiver_reg = frame_->Peek2(); + Register arguments_reg = frame_->Peek(); + + // From now on the frame is spilled. + frame_->SpillAll(); + + // Emit the source position information after having loaded the + // receiver and the arguments. + CodeForSourcePosition(position); + // Contents of the stack at this point: + // sp[0]: arguments object of the current function or the hole. + // sp[1]: receiver + // sp[2]: applicand.apply + // sp[3]: applicand. + + // Check if the arguments object has been lazily allocated + // already. If so, just use that instead of copying the arguments + // from the stack. This also deals with cases where a local variable + // named 'arguments' has been introduced. + JumpTarget slow; + Label done; + __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex); + __ cmp(ip, arguments_reg); + slow.Branch(ne); + + Label build_args; + // Get rid of the arguments object probe. + frame_->Drop(); + // Stack now has 3 elements on it. + // Contents of stack at this point: + // sp[0]: receiver - in the receiver_reg register. + // sp[1]: applicand.apply + // sp[2]: applicand. + + // Check that the receiver really is a JavaScript object. + __ JumpIfSmi(receiver_reg, &build_args); + // We allow all JSObjects including JSFunctions. As long as + // JS_FUNCTION_TYPE is the last instance type and it is right + // after LAST_JS_OBJECT_TYPE, we do not have to check the upper + // bound. + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, &build_args); + + // Check that applicand.apply is Function.prototype.apply. + __ ldr(r0, MemOperand(sp, kPointerSize)); + __ JumpIfSmi(r0, &build_args); + __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE); + __ b(ne, &build_args); + Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); + __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); + __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ cmp(r1, Operand(apply_code)); + __ b(ne, &build_args); + + // Check that applicand is a function. + __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); + __ JumpIfSmi(r1, &build_args); + __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE); + __ b(ne, &build_args); + + // Copy the arguments to this function possibly from the + // adaptor frame below it. + Label invoke, adapted; + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(eq, &adapted); + + // No arguments adaptor frame. Copy fixed number of arguments. + __ mov(r0, Operand(scope()->num_parameters())); + for (int i = 0; i < scope()->num_parameters(); i++) { + __ ldr(r2, frame_->ParameterAt(i)); + __ push(r2); + } + __ jmp(&invoke); + + // Arguments adaptor frame present. Copy arguments from there, but + // avoid copying too many arguments to avoid stack overflows. + __ bind(&adapted); + static const uint32_t kArgumentsLimit = 1 * KB; + __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(r0, Operand(r0, LSR, kSmiTagSize)); + __ mov(r3, r0); + __ cmp(r0, Operand(kArgumentsLimit)); + __ b(gt, &build_args); + + // Loop through the arguments pushing them onto the execution + // stack. We don't inform the virtual frame of the push, so we don't + // have to worry about getting rid of the elements from the virtual + // frame. + Label loop; + // r3 is a small non-negative integer, due to the test above. + __ cmp(r3, Operand(0, RelocInfo::NONE)); + __ b(eq, &invoke); + // Compute the address of the first argument. + __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2)); + __ add(r2, r2, Operand(kPointerSize)); + __ bind(&loop); + // Post-decrement argument address by kPointerSize on each iteration. + __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex)); + __ push(r4); + __ sub(r3, r3, Operand(1), SetCC); + __ b(gt, &loop); + + // Invoke the function. + __ bind(&invoke); + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION); + // Drop applicand.apply and applicand from the stack, and push + // the result of the function call, but leave the spilled frame + // unchanged, with 3 elements, so it is correct when we compile the + // slow-case code. + __ add(sp, sp, Operand(2 * kPointerSize)); + __ push(r0); + // Stack now has 1 element: + // sp[0]: result + __ jmp(&done); + + // Slow-case: Allocate the arguments object since we know it isn't + // there, and fall-through to the slow-case where we call + // applicand.apply. + __ bind(&build_args); + // Stack now has 3 elements, because we have jumped from where: + // sp[0]: receiver + // sp[1]: applicand.apply + // sp[2]: applicand. + StoreArgumentsObject(false); + + // Stack and frame now have 4 elements. + slow.Bind(); + + // Generic computation of x.apply(y, args) with no special optimization. + // Flip applicand.apply and applicand on the stack, so + // applicand looks like the receiver of the applicand.apply call. + // Then process it as a normal function call. + __ ldr(r0, MemOperand(sp, 3 * kPointerSize)); + __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); + __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize)); + + CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); + frame_->CallStub(&call_function, 3); + // The function and its two arguments have been dropped. + frame_->Drop(); // Drop the receiver as well. + frame_->EmitPush(r0); + frame_->SpillAll(); // A spilled frame is also jumping to label done. + // Stack now has 1 element: + // sp[0]: result + __ bind(&done); + + // Restore the context register after a call. + __ ldr(cp, frame_->Context()); +} + + +void CodeGenerator::Branch(bool if_true, JumpTarget* target) { + ASSERT(has_cc()); + Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_); + target->Branch(cond); + cc_reg_ = al; +} + + +void CodeGenerator::CheckStack() { + frame_->SpillAll(); + Comment cmnt(masm_, "[ check stack"); + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + masm_->cmp(sp, Operand(ip)); + StackCheckStub stub; + // Call the stub if lower. + masm_->mov(ip, + Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), + RelocInfo::CODE_TARGET), + LeaveCC, + lo); + masm_->Call(ip, lo); +} + + +void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + for (int i = 0; frame_ != NULL && i < statements->length(); i++) { + Visit(statements->at(i)); + } + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitBlock(Block* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Block"); + CodeForStatementPosition(node); + node->break_target()->SetExpectedHeight(); + VisitStatements(node->statements()); + if (node->break_target()->is_linked()) { + node->break_target()->Bind(); + } + node->break_target()->Unuse(); + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { + frame_->EmitPush(cp); + frame_->EmitPush(Operand(pairs)); + frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0))); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + + frame_->CallRuntime(Runtime::kDeclareGlobals, 4); + // The result is discarded. +} + + +void CodeGenerator::VisitDeclaration(Declaration* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Declaration"); + Variable* var = node->proxy()->var(); + ASSERT(var != NULL); // must have been resolved + Slot* slot = var->AsSlot(); + + // If it was not possible to allocate the variable at compile time, + // we need to "declare" it at runtime to make sure it actually + // exists in the local context. + if (slot != NULL && slot->type() == Slot::LOOKUP) { + // Variables with a "LOOKUP" slot were introduced as non-locals + // during variable resolution and must have mode DYNAMIC. + ASSERT(var->is_dynamic()); + // For now, just do a runtime call. + frame_->EmitPush(cp); + frame_->EmitPush(Operand(var->name())); + // Declaration nodes are always declared in only two modes. + ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST); + PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY; + frame_->EmitPush(Operand(Smi::FromInt(attr))); + // Push initial value, if any. + // Note: For variables we must not push an initial value (such as + // 'undefined') because we may have a (legal) redeclaration and we + // must not destroy the current value. + if (node->mode() == Variable::CONST) { + frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex); + } else if (node->fun() != NULL) { + Load(node->fun()); + } else { + frame_->EmitPush(Operand(0, RelocInfo::NONE)); + } + + frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); + // Ignore the return value (declarations are statements). + + ASSERT(frame_->height() == original_height); + return; + } + + ASSERT(!var->is_global()); + + // If we have a function or a constant, we need to initialize the variable. + Expression* val = NULL; + if (node->mode() == Variable::CONST) { + val = new Literal(Factory::the_hole_value()); + } else { + val = node->fun(); // NULL if we don't have a function + } + + + if (val != NULL) { + WriteBarrierCharacter wb_info = + val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI; + if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE; + // Set initial value. + Reference target(this, node->proxy()); + Load(val); + target.SetValue(NOT_CONST_INIT, wb_info); + + // Get rid of the assigned value (declarations are statements). + frame_->Drop(); + } + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ ExpressionStatement"); + CodeForStatementPosition(node); + Expression* expression = node->expression(); + expression->MarkAsStatement(); + Load(expression); + frame_->Drop(); + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "// EmptyStatement"); + CodeForStatementPosition(node); + // nothing to do + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::VisitIfStatement(IfStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ IfStatement"); + // Generate different code depending on which parts of the if statement + // are present or not. + bool has_then_stm = node->HasThenStatement(); + bool has_else_stm = node->HasElseStatement(); + + CodeForStatementPosition(node); + + JumpTarget exit; + if (has_then_stm && has_else_stm) { + Comment cmnt(masm_, "[ IfThenElse"); + JumpTarget then; + JumpTarget else_; + // if (cond) + LoadCondition(node->condition(), &then, &else_, true); + if (frame_ != NULL) { + Branch(false, &else_); + } + // then + if (frame_ != NULL || then.is_linked()) { + then.Bind(); + Visit(node->then_statement()); + } + if (frame_ != NULL) { + exit.Jump(); + } + // else + if (else_.is_linked()) { + else_.Bind(); + Visit(node->else_statement()); + } + + } else if (has_then_stm) { + Comment cmnt(masm_, "[ IfThen"); + ASSERT(!has_else_stm); + JumpTarget then; + // if (cond) + LoadCondition(node->condition(), &then, &exit, true); + if (frame_ != NULL) { + Branch(false, &exit); + } + // then + if (frame_ != NULL || then.is_linked()) { + then.Bind(); + Visit(node->then_statement()); + } + + } else if (has_else_stm) { + Comment cmnt(masm_, "[ IfElse"); + ASSERT(!has_then_stm); + JumpTarget else_; + // if (!cond) + LoadCondition(node->condition(), &exit, &else_, true); + if (frame_ != NULL) { + Branch(true, &exit); + } + // else + if (frame_ != NULL || else_.is_linked()) { + else_.Bind(); + Visit(node->else_statement()); + } + + } else { + Comment cmnt(masm_, "[ If"); + ASSERT(!has_then_stm && !has_else_stm); + // if (cond) + LoadCondition(node->condition(), &exit, &exit, false); + if (frame_ != NULL) { + if (has_cc()) { + cc_reg_ = al; + } else { + frame_->Drop(); + } + } + } + + // end + if (exit.is_linked()) { + exit.Bind(); + } + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { + Comment cmnt(masm_, "[ ContinueStatement"); + CodeForStatementPosition(node); + node->target()->continue_target()->Jump(); +} + + +void CodeGenerator::VisitBreakStatement(BreakStatement* node) { + Comment cmnt(masm_, "[ BreakStatement"); + CodeForStatementPosition(node); + node->target()->break_target()->Jump(); +} + + +void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { + Comment cmnt(masm_, "[ ReturnStatement"); + + CodeForStatementPosition(node); + Load(node->expression()); + frame_->PopToR0(); + frame_->PrepareForReturn(); + if (function_return_is_shadowed_) { + function_return_.Jump(); + } else { + // Pop the result from the frame and prepare the frame for + // returning thus making it easier to merge. + if (function_return_.is_bound()) { + // If the function return label is already bound we reuse the + // code by jumping to the return site. + function_return_.Jump(); + } else { + function_return_.Bind(); + GenerateReturnSequence(); + } + } +} + + +void CodeGenerator::GenerateReturnSequence() { + if (FLAG_trace) { + // Push the return value on the stack as the parameter. + // Runtime::TraceExit returns the parameter as it is. + frame_->EmitPush(r0); + frame_->CallRuntime(Runtime::kTraceExit, 1); + } + +#ifdef DEBUG + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); +#endif + // Make sure that the constant pool is not emitted inside of the return + // sequence. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + // Tear down the frame which will restore the caller's frame pointer and + // the link register. + frame_->Exit(); + + // Here we use masm_-> instead of the __ macro to avoid the code coverage + // tool from instrumenting as we rely on the code size here. + int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize; + masm_->add(sp, sp, Operand(sp_delta)); + masm_->Jump(lr); + DeleteFrame(); + +#ifdef DEBUG + // Check that the size of the code used for returning is large enough + // for the debugger's requirements. + ASSERT(Assembler::kJSReturnSequenceInstructions <= + masm_->InstructionsGeneratedSince(&check_exit_codesize)); +#endif + } +} + + +void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ WithEnterStatement"); + CodeForStatementPosition(node); + Load(node->expression()); + if (node->is_catch_block()) { + frame_->CallRuntime(Runtime::kPushCatchContext, 1); + } else { + frame_->CallRuntime(Runtime::kPushContext, 1); + } +#ifdef DEBUG + JumpTarget verified_true; + __ cmp(r0, cp); + verified_true.Branch(eq); + __ stop("PushContext: r0 is expected to be the same as cp"); + verified_true.Bind(); +#endif + // Update context local. + __ str(cp, frame_->Context()); + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ WithExitStatement"); + CodeForStatementPosition(node); + // Pop context. + __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX)); + // Update context local. + __ str(cp, frame_->Context()); + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ SwitchStatement"); + CodeForStatementPosition(node); + node->break_target()->SetExpectedHeight(); + + Load(node->tag()); + + JumpTarget next_test; + JumpTarget fall_through; + JumpTarget default_entry; + JumpTarget default_exit(JumpTarget::BIDIRECTIONAL); + ZoneList<CaseClause*>* cases = node->cases(); + int length = cases->length(); + CaseClause* default_clause = NULL; + + for (int i = 0; i < length; i++) { + CaseClause* clause = cases->at(i); + if (clause->is_default()) { + // Remember the default clause and compile it at the end. + default_clause = clause; + continue; + } + + Comment cmnt(masm_, "[ Case clause"); + // Compile the test. + next_test.Bind(); + next_test.Unuse(); + // Duplicate TOS. + frame_->Dup(); + Comparison(eq, NULL, clause->label(), true); + Branch(false, &next_test); + + // Before entering the body from the test, remove the switch value from + // the stack. + frame_->Drop(); + + // Label the body so that fall through is enabled. + if (i > 0 && cases->at(i - 1)->is_default()) { + default_exit.Bind(); + } else { + fall_through.Bind(); + fall_through.Unuse(); + } + VisitStatements(clause->statements()); + + // If control flow can fall through from the body, jump to the next body + // or the end of the statement. + if (frame_ != NULL) { + if (i < length - 1 && cases->at(i + 1)->is_default()) { + default_entry.Jump(); + } else { + fall_through.Jump(); + } + } + } + + // The final "test" removes the switch value. + next_test.Bind(); + frame_->Drop(); + + // If there is a default clause, compile it. + if (default_clause != NULL) { + Comment cmnt(masm_, "[ Default clause"); + default_entry.Bind(); + VisitStatements(default_clause->statements()); + // If control flow can fall out of the default and there is a case after + // it, jump to that case's body. + if (frame_ != NULL && default_exit.is_bound()) { + default_exit.Jump(); + } + } + + if (fall_through.is_linked()) { + fall_through.Bind(); + } + + if (node->break_target()->is_linked()) { + node->break_target()->Bind(); + } + node->break_target()->Unuse(); + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ DoWhileStatement"); + CodeForStatementPosition(node); + node->break_target()->SetExpectedHeight(); + JumpTarget body(JumpTarget::BIDIRECTIONAL); + IncrementLoopNesting(); + + // Label the top of the loop for the backward CFG edge. If the test + // is always true we can use the continue target, and if the test is + // always false there is no need. + ConditionAnalysis info = AnalyzeCondition(node->cond()); + switch (info) { + case ALWAYS_TRUE: + node->continue_target()->SetExpectedHeight(); + node->continue_target()->Bind(); + break; + case ALWAYS_FALSE: + node->continue_target()->SetExpectedHeight(); + break; + case DONT_KNOW: + node->continue_target()->SetExpectedHeight(); + body.Bind(); + break; + } + + CheckStack(); // TODO(1222600): ignore if body contains calls. + Visit(node->body()); + + // Compile the test. + switch (info) { + case ALWAYS_TRUE: + // If control can fall off the end of the body, jump back to the + // top. + if (has_valid_frame()) { + node->continue_target()->Jump(); + } + break; + case ALWAYS_FALSE: + // If we have a continue in the body, we only have to bind its + // jump target. + if (node->continue_target()->is_linked()) { + node->continue_target()->Bind(); + } + break; + case DONT_KNOW: + // We have to compile the test expression if it can be reached by + // control flow falling out of the body or via continue. + if (node->continue_target()->is_linked()) { + node->continue_target()->Bind(); + } + if (has_valid_frame()) { + Comment cmnt(masm_, "[ DoWhileCondition"); + CodeForDoWhileConditionPosition(node); + LoadCondition(node->cond(), &body, node->break_target(), true); + if (has_valid_frame()) { + // A invalid frame here indicates that control did not + // fall out of the test expression. + Branch(true, &body); + } + } + break; + } + + if (node->break_target()->is_linked()) { + node->break_target()->Bind(); + } + DecrementLoopNesting(); + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitWhileStatement(WhileStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ WhileStatement"); + CodeForStatementPosition(node); + + // If the test is never true and has no side effects there is no need + // to compile the test or body. + ConditionAnalysis info = AnalyzeCondition(node->cond()); + if (info == ALWAYS_FALSE) return; + + node->break_target()->SetExpectedHeight(); + IncrementLoopNesting(); + + // Label the top of the loop with the continue target for the backward + // CFG edge. + node->continue_target()->SetExpectedHeight(); + node->continue_target()->Bind(); + + if (info == DONT_KNOW) { + JumpTarget body(JumpTarget::BIDIRECTIONAL); + LoadCondition(node->cond(), &body, node->break_target(), true); + if (has_valid_frame()) { + // A NULL frame indicates that control did not fall out of the + // test expression. + Branch(false, node->break_target()); + } + if (has_valid_frame() || body.is_linked()) { + body.Bind(); + } + } + + if (has_valid_frame()) { + CheckStack(); // TODO(1222600): ignore if body contains calls. + Visit(node->body()); + + // If control flow can fall out of the body, jump back to the top. + if (has_valid_frame()) { + node->continue_target()->Jump(); + } + } + if (node->break_target()->is_linked()) { + node->break_target()->Bind(); + } + DecrementLoopNesting(); + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitForStatement(ForStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ ForStatement"); + CodeForStatementPosition(node); + if (node->init() != NULL) { + Visit(node->init()); + } + + // If the test is never true there is no need to compile the test or + // body. + ConditionAnalysis info = AnalyzeCondition(node->cond()); + if (info == ALWAYS_FALSE) return; + + node->break_target()->SetExpectedHeight(); + IncrementLoopNesting(); + + // We know that the loop index is a smi if it is not modified in the + // loop body and it is checked against a constant limit in the loop + // condition. In this case, we reset the static type information of the + // loop index to smi before compiling the body, the update expression, and + // the bottom check of the loop condition. + TypeInfoCodeGenState type_info_scope(this, + node->is_fast_smi_loop() ? + node->loop_variable()->AsSlot() : + NULL, + TypeInfo::Smi()); + + // If there is no update statement, label the top of the loop with the + // continue target, otherwise with the loop target. + JumpTarget loop(JumpTarget::BIDIRECTIONAL); + if (node->next() == NULL) { + node->continue_target()->SetExpectedHeight(); + node->continue_target()->Bind(); + } else { + node->continue_target()->SetExpectedHeight(); + loop.Bind(); + } + + // If the test is always true, there is no need to compile it. + if (info == DONT_KNOW) { + JumpTarget body; + LoadCondition(node->cond(), &body, node->break_target(), true); + if (has_valid_frame()) { + Branch(false, node->break_target()); + } + if (has_valid_frame() || body.is_linked()) { + body.Bind(); + } + } + + if (has_valid_frame()) { + CheckStack(); // TODO(1222600): ignore if body contains calls. + Visit(node->body()); + + if (node->next() == NULL) { + // If there is no update statement and control flow can fall out + // of the loop, jump directly to the continue label. + if (has_valid_frame()) { + node->continue_target()->Jump(); + } + } else { + // If there is an update statement and control flow can reach it + // via falling out of the body of the loop or continuing, we + // compile the update statement. + if (node->continue_target()->is_linked()) { + node->continue_target()->Bind(); + } + if (has_valid_frame()) { + // Record source position of the statement as this code which is + // after the code for the body actually belongs to the loop + // statement and not the body. + CodeForStatementPosition(node); + Visit(node->next()); + loop.Jump(); + } + } + } + if (node->break_target()->is_linked()) { + node->break_target()->Bind(); + } + DecrementLoopNesting(); + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitForInStatement(ForInStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ ForInStatement"); + CodeForStatementPosition(node); + + JumpTarget primitive; + JumpTarget jsobject; + JumpTarget fixed_array; + JumpTarget entry(JumpTarget::BIDIRECTIONAL); + JumpTarget end_del_check; + JumpTarget exit; + + // Get the object to enumerate over (converted to JSObject). + Load(node->enumerable()); + + VirtualFrame::SpilledScope spilled_scope(frame_); + // Both SpiderMonkey and kjs ignore null and undefined in contrast + // to the specification. 12.6.4 mandates a call to ToObject. + frame_->EmitPop(r0); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r0, ip); + exit.Branch(eq); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(r0, ip); + exit.Branch(eq); + + // Stack layout in body: + // [iteration counter (Smi)] + // [length of array] + // [FixedArray] + // [Map or 0] + // [Object] + + // Check if enumerable is already a JSObject + __ tst(r0, Operand(kSmiTagMask)); + primitive.Branch(eq); + __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); + jsobject.Branch(hs); + + primitive.Bind(); + frame_->EmitPush(r0); + frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1); + + jsobject.Bind(); + // Get the set of properties (as a FixedArray or Map). + // r0: value to be iterated over + frame_->EmitPush(r0); // Push the object being iterated over. + + // Check cache validity in generated code. This is a fast case for + // the JSObject::IsSimpleEnum cache validity checks. If we cannot + // guarantee cache validity, call the runtime system to check cache + // validity or get the property names in a fixed array. + JumpTarget call_runtime; + JumpTarget loop(JumpTarget::BIDIRECTIONAL); + JumpTarget check_prototype; + JumpTarget use_cache; + __ mov(r1, Operand(r0)); + loop.Bind(); + // Check that there are no elements. + __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); + __ cmp(r2, r4); + call_runtime.Branch(ne); + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in r3 for the subsequent + // prototype load. + __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset)); + __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex); + __ cmp(r2, ip); + call_runtime.Branch(eq); + // Check that there in an enum cache in the non-empty instance + // descriptors. This is the case if the next enumeration index + // field does not contain a smi. + __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset)); + __ tst(r2, Operand(kSmiTagMask)); + call_runtime.Branch(eq); + // For all objects but the receiver, check that the cache is empty. + // r4: empty fixed array root. + __ cmp(r1, r0); + check_prototype.Branch(eq); + __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset)); + __ cmp(r2, r4); + call_runtime.Branch(ne); + check_prototype.Bind(); + // Load the prototype from the map and loop if non-null. + __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset)); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(r1, ip); + loop.Branch(ne); + // The enum cache is valid. Load the map of the object being + // iterated over and use the cache for the iteration. + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + use_cache.Jump(); + + call_runtime.Bind(); + // Call the runtime to get the property names for the object. + frame_->EmitPush(r0); // push the object (slot 4) for the runtime call + frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1); + + // If we got a map from the runtime call, we can do a fast + // modification check. Otherwise, we got a fixed array, and we have + // to do a slow check. + // r0: map or fixed array (result from call to + // Runtime::kGetPropertyNamesFast) + __ mov(r2, Operand(r0)); + __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kMetaMapRootIndex); + __ cmp(r1, ip); + fixed_array.Branch(ne); + + use_cache.Bind(); + // Get enum cache + // r0: map (either the result from a call to + // Runtime::kGetPropertyNamesFast or has been fetched directly from + // the object) + __ mov(r1, Operand(r0)); + __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset)); + __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); + __ ldr(r2, + FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); + + frame_->EmitPush(r0); // map + frame_->EmitPush(r2); // enum cache bridge cache + __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset)); + frame_->EmitPush(r0); + __ mov(r0, Operand(Smi::FromInt(0))); + frame_->EmitPush(r0); + entry.Jump(); + + fixed_array.Bind(); + __ mov(r1, Operand(Smi::FromInt(0))); + frame_->EmitPush(r1); // insert 0 in place of Map + frame_->EmitPush(r0); + + // Push the length of the array and the initial index onto the stack. + __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset)); + frame_->EmitPush(r0); + __ mov(r0, Operand(Smi::FromInt(0))); // init index + frame_->EmitPush(r0); + + // Condition. + entry.Bind(); + // sp[0] : index + // sp[1] : array/enum cache length + // sp[2] : array or enum cache + // sp[3] : 0 or map + // sp[4] : enumerable + // Grab the current frame's height for the break and continue + // targets only after all the state is pushed on the frame. + node->break_target()->SetExpectedHeight(); + node->continue_target()->SetExpectedHeight(); + + // Load the current count to r0, load the length to r1. + __ Ldrd(r0, r1, frame_->ElementAt(0)); + __ cmp(r0, r1); // compare to the array length + node->break_target()->Branch(hs); + + // Get the i'th entry of the array. + __ ldr(r2, frame_->ElementAt(2)); + __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + + // Get Map or 0. + __ ldr(r2, frame_->ElementAt(3)); + // Check if this (still) matches the map of the enumerable. + // If not, we have to filter the key. + __ ldr(r1, frame_->ElementAt(4)); + __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ cmp(r1, Operand(r2)); + end_del_check.Branch(eq); + + // Convert the entry to a string (or null if it isn't a property anymore). + __ ldr(r0, frame_->ElementAt(4)); // push enumerable + frame_->EmitPush(r0); + frame_->EmitPush(r3); // push entry + frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2); + __ mov(r3, Operand(r0), SetCC); + // If the property has been removed while iterating, we just skip it. + node->continue_target()->Branch(eq); + + end_del_check.Bind(); + // Store the entry in the 'each' expression and take another spin in the + // loop. r3: i'th entry of the enum cache (or string there of) + frame_->EmitPush(r3); // push entry + { VirtualFrame::RegisterAllocationScope scope(this); + Reference each(this, node->each()); + if (!each.is_illegal()) { + if (each.size() > 0) { + // Loading a reference may leave the frame in an unspilled state. + frame_->SpillAll(); // Sync stack to memory. + // Get the value (under the reference on the stack) from memory. + __ ldr(r0, frame_->ElementAt(each.size())); + frame_->EmitPush(r0); + each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI); + frame_->Drop(2); // The result of the set and the extra pushed value. + } else { + // If the reference was to a slot we rely on the convenient property + // that it doesn't matter whether a value (eg, ebx pushed above) is + // right on top of or right underneath a zero-sized reference. + each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI); + frame_->Drop(1); // Drop the result of the set operation. + } + } + } + // Body. + CheckStack(); // TODO(1222600): ignore if body contains calls. + { VirtualFrame::RegisterAllocationScope scope(this); + Visit(node->body()); + } + + // Next. Reestablish a spilled frame in case we are coming here via + // a continue in the body. + node->continue_target()->Bind(); + frame_->SpillAll(); + frame_->EmitPop(r0); + __ add(r0, r0, Operand(Smi::FromInt(1))); + frame_->EmitPush(r0); + entry.Jump(); + + // Cleanup. No need to spill because VirtualFrame::Drop is safe for + // any frame. + node->break_target()->Bind(); + frame_->Drop(5); + + // Exit. + exit.Bind(); + node->continue_target()->Unuse(); + node->break_target()->Unuse(); + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope(frame_); + Comment cmnt(masm_, "[ TryCatchStatement"); + CodeForStatementPosition(node); + + JumpTarget try_block; + JumpTarget exit; + + try_block.Call(); + // --- Catch block --- + frame_->EmitPush(r0); + + // Store the caught exception in the catch variable. + Variable* catch_var = node->catch_var()->var(); + ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL); + StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT); + + // Remove the exception from the stack. + frame_->Drop(); + + { VirtualFrame::RegisterAllocationScope scope(this); + VisitStatements(node->catch_block()->statements()); + } + if (frame_ != NULL) { + exit.Jump(); + } + + + // --- Try block --- + try_block.Bind(); + + frame_->PushTryHandler(TRY_CATCH_HANDLER); + int handler_height = frame_->height(); + + // Shadow the labels for all escapes from the try block, including + // returns. During shadowing, the original label is hidden as the + // LabelShadow and operations on the original actually affect the + // shadowing label. + // + // We should probably try to unify the escaping labels and the return + // label. + int nof_escapes = node->escaping_targets()->length(); + List<ShadowTarget*> shadows(1 + nof_escapes); + + // Add the shadow target for the function return. + static const int kReturnShadowIndex = 0; + shadows.Add(new ShadowTarget(&function_return_)); + bool function_return_was_shadowed = function_return_is_shadowed_; + function_return_is_shadowed_ = true; + ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); + + // Add the remaining shadow targets. + for (int i = 0; i < nof_escapes; i++) { + shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); + } + + // Generate code for the statements in the try block. + { VirtualFrame::RegisterAllocationScope scope(this); + VisitStatements(node->try_block()->statements()); + } + + // Stop the introduced shadowing and count the number of required unlinks. + // After shadowing stops, the original labels are unshadowed and the + // LabelShadows represent the formerly shadowing labels. + bool has_unlinks = false; + for (int i = 0; i < shadows.length(); i++) { + shadows[i]->StopShadowing(); + has_unlinks = has_unlinks || shadows[i]->is_linked(); + } + function_return_is_shadowed_ = function_return_was_shadowed; + + // Get an external reference to the handler address. + ExternalReference handler_address(Top::k_handler_address); + + // If we can fall off the end of the try block, unlink from try chain. + if (has_valid_frame()) { + // The next handler address is on top of the frame. Unlink from + // the handler list and drop the rest of this handler from the + // frame. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + frame_->EmitPop(r1); // r0 can contain the return value. + __ mov(r3, Operand(handler_address)); + __ str(r1, MemOperand(r3)); + frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); + if (has_unlinks) { + exit.Jump(); + } + } + + // Generate unlink code for the (formerly) shadowing labels that have been + // jumped to. Deallocate each shadow target. + for (int i = 0; i < shadows.length(); i++) { + if (shadows[i]->is_linked()) { + // Unlink from try chain; + shadows[i]->Bind(); + // Because we can be jumping here (to spilled code) from unspilled + // code, we need to reestablish a spilled frame at this block. + frame_->SpillAll(); + + // Reload sp from the top handler, because some statements that we + // break from (eg, for...in) may have left stuff on the stack. + __ mov(r3, Operand(handler_address)); + __ ldr(sp, MemOperand(r3)); + frame_->Forget(frame_->height() - handler_height); + + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + frame_->EmitPop(r1); // r0 can contain the return value. + __ str(r1, MemOperand(r3)); + frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); + + if (!function_return_is_shadowed_ && i == kReturnShadowIndex) { + frame_->PrepareForReturn(); + } + shadows[i]->other_target()->Jump(); + } + } + + exit.Bind(); + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope(frame_); + Comment cmnt(masm_, "[ TryFinallyStatement"); + CodeForStatementPosition(node); + + // State: Used to keep track of reason for entering the finally + // block. Should probably be extended to hold information for + // break/continue from within the try block. + enum { FALLING, THROWING, JUMPING }; + + JumpTarget try_block; + JumpTarget finally_block; + + try_block.Call(); + + frame_->EmitPush(r0); // save exception object on the stack + // In case of thrown exceptions, this is where we continue. + __ mov(r2, Operand(Smi::FromInt(THROWING))); + finally_block.Jump(); + + // --- Try block --- + try_block.Bind(); + + frame_->PushTryHandler(TRY_FINALLY_HANDLER); + int handler_height = frame_->height(); + + // Shadow the labels for all escapes from the try block, including + // returns. Shadowing hides the original label as the LabelShadow and + // operations on the original actually affect the shadowing label. + // + // We should probably try to unify the escaping labels and the return + // label. + int nof_escapes = node->escaping_targets()->length(); + List<ShadowTarget*> shadows(1 + nof_escapes); + + // Add the shadow target for the function return. + static const int kReturnShadowIndex = 0; + shadows.Add(new ShadowTarget(&function_return_)); + bool function_return_was_shadowed = function_return_is_shadowed_; + function_return_is_shadowed_ = true; + ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); + + // Add the remaining shadow targets. + for (int i = 0; i < nof_escapes; i++) { + shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); + } + + // Generate code for the statements in the try block. + { VirtualFrame::RegisterAllocationScope scope(this); + VisitStatements(node->try_block()->statements()); + } + + // Stop the introduced shadowing and count the number of required unlinks. + // After shadowing stops, the original labels are unshadowed and the + // LabelShadows represent the formerly shadowing labels. + int nof_unlinks = 0; + for (int i = 0; i < shadows.length(); i++) { + shadows[i]->StopShadowing(); + if (shadows[i]->is_linked()) nof_unlinks++; + } + function_return_is_shadowed_ = function_return_was_shadowed; + + // Get an external reference to the handler address. + ExternalReference handler_address(Top::k_handler_address); + + // If we can fall off the end of the try block, unlink from the try + // chain and set the state on the frame to FALLING. + if (has_valid_frame()) { + // The next handler address is on top of the frame. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + frame_->EmitPop(r1); + __ mov(r3, Operand(handler_address)); + __ str(r1, MemOperand(r3)); + frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); + + // Fake a top of stack value (unneeded when FALLING) and set the + // state in r2, then jump around the unlink blocks if any. + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + frame_->EmitPush(r0); + __ mov(r2, Operand(Smi::FromInt(FALLING))); + if (nof_unlinks > 0) { + finally_block.Jump(); + } + } + + // Generate code to unlink and set the state for the (formerly) + // shadowing targets that have been jumped to. + for (int i = 0; i < shadows.length(); i++) { + if (shadows[i]->is_linked()) { + // If we have come from the shadowed return, the return value is + // in (a non-refcounted reference to) r0. We must preserve it + // until it is pushed. + // + // Because we can be jumping here (to spilled code) from + // unspilled code, we need to reestablish a spilled frame at + // this block. + shadows[i]->Bind(); + frame_->SpillAll(); + + // Reload sp from the top handler, because some statements that + // we break from (eg, for...in) may have left stuff on the + // stack. + __ mov(r3, Operand(handler_address)); + __ ldr(sp, MemOperand(r3)); + frame_->Forget(frame_->height() - handler_height); + + // Unlink this handler and drop it from the frame. The next + // handler address is currently on top of the frame. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + frame_->EmitPop(r1); + __ str(r1, MemOperand(r3)); + frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); + + if (i == kReturnShadowIndex) { + // If this label shadowed the function return, materialize the + // return value on the stack. + frame_->EmitPush(r0); + } else { + // Fake TOS for targets that shadowed breaks and continues. + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + frame_->EmitPush(r0); + } + __ mov(r2, Operand(Smi::FromInt(JUMPING + i))); + if (--nof_unlinks > 0) { + // If this is not the last unlink block, jump around the next. + finally_block.Jump(); + } + } + } + + // --- Finally block --- + finally_block.Bind(); + + // Push the state on the stack. + frame_->EmitPush(r2); + + // We keep two elements on the stack - the (possibly faked) result + // and the state - while evaluating the finally block. + // + // Generate code for the statements in the finally block. + { VirtualFrame::RegisterAllocationScope scope(this); + VisitStatements(node->finally_block()->statements()); + } + + if (has_valid_frame()) { + // Restore state and return value or faked TOS. + frame_->EmitPop(r2); + frame_->EmitPop(r0); + } + + // Generate code to jump to the right destination for all used + // formerly shadowing targets. Deallocate each shadow target. + for (int i = 0; i < shadows.length(); i++) { + if (has_valid_frame() && shadows[i]->is_bound()) { + JumpTarget* original = shadows[i]->other_target(); + __ cmp(r2, Operand(Smi::FromInt(JUMPING + i))); + if (!function_return_is_shadowed_ && i == kReturnShadowIndex) { + JumpTarget skip; + skip.Branch(ne); + frame_->PrepareForReturn(); + original->Jump(); + skip.Bind(); + } else { + original->Branch(eq); + } + } + } + + if (has_valid_frame()) { + // Check if we need to rethrow the exception. + JumpTarget exit; + __ cmp(r2, Operand(Smi::FromInt(THROWING))); + exit.Branch(ne); + + // Rethrow exception. + frame_->EmitPush(r0); + frame_->CallRuntime(Runtime::kReThrow, 1); + + // Done. + exit.Bind(); + } + ASSERT(!has_valid_frame() || frame_->height() == original_height); +} + + +void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ DebuggerStatament"); + CodeForStatementPosition(node); +#ifdef ENABLE_DEBUGGER_SUPPORT + frame_->DebugBreak(); +#endif + // Ignore the return value. + ASSERT(frame_->height() == original_height); +} + + +void CodeGenerator::InstantiateFunction( + Handle<SharedFunctionInfo> function_info, + bool pretenure) { + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. + if (scope()->is_function_scope() && + function_info->num_literals() == 0 && + !pretenure) { + FastNewClosureStub stub; + frame_->EmitPush(Operand(function_info)); + frame_->SpillAll(); + frame_->CallStub(&stub, 1); + frame_->EmitPush(r0); + } else { + // Create a new closure. + frame_->EmitPush(cp); + frame_->EmitPush(Operand(function_info)); + frame_->EmitPush(Operand(pretenure + ? Factory::true_value() + : Factory::false_value())); + frame_->CallRuntime(Runtime::kNewClosure, 3); + frame_->EmitPush(r0); + } +} + + +void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ FunctionLiteral"); + + // Build the function info and instantiate it. + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(node, script()); + if (function_info.is_null()) { + SetStackOverflow(); + ASSERT(frame_->height() == original_height); + return; + } + InstantiateFunction(function_info, node->pretenure()); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); + InstantiateFunction(node->shared_function_info(), false); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitConditional(Conditional* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Conditional"); + JumpTarget then; + JumpTarget else_; + LoadCondition(node->condition(), &then, &else_, true); + if (has_valid_frame()) { + Branch(false, &else_); + } + if (has_valid_frame() || then.is_linked()) { + then.Bind(); + Load(node->then_expression()); + } + if (else_.is_linked()) { + JumpTarget exit; + if (has_valid_frame()) exit.Jump(); + else_.Bind(); + Load(node->else_expression()); + if (exit.is_linked()) exit.Bind(); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { + if (slot->type() == Slot::LOOKUP) { + ASSERT(slot->var()->is_dynamic()); + + // JumpTargets do not yet support merging frames so the frame must be + // spilled when jumping to these targets. + JumpTarget slow; + JumpTarget done; + + // Generate fast case for loading from slots that correspond to + // local/global variables or arguments unless they are shadowed by + // eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(slot, + typeof_state, + &slow, + &done); + + slow.Bind(); + frame_->EmitPush(cp); + frame_->EmitPush(Operand(slot->var()->name())); + + if (typeof_state == INSIDE_TYPEOF) { + frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + } else { + frame_->CallRuntime(Runtime::kLoadContextSlot, 2); + } + + done.Bind(); + frame_->EmitPush(r0); + + } else { + Register scratch = VirtualFrame::scratch0(); + TypeInfo info = type_info(slot); + frame_->EmitPush(SlotOperand(slot, scratch), info); + + if (slot->var()->mode() == Variable::CONST) { + // Const slots may contain 'the hole' value (the constant hasn't been + // initialized yet) which needs to be converted into the 'undefined' + // value. + Comment cmnt(masm_, "[ Unhole const"); + Register tos = frame_->PopToRegister(); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(tos, ip); + __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq); + frame_->EmitPush(tos); + } + } +} + + +void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, + TypeofState state) { + VirtualFrame::RegisterAllocationScope scope(this); + LoadFromSlot(slot, state); + + // Bail out quickly if we're not using lazy arguments allocation. + if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; + + // ... or if the slot isn't a non-parameter arguments slot. + if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; + + // Load the loaded value from the stack into a register but leave it on the + // stack. + Register tos = frame_->Peek(); + + // If the loaded value is the sentinel that indicates that we + // haven't loaded the arguments object yet, we need to do it now. + JumpTarget exit; + __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex); + __ cmp(tos, ip); + exit.Branch(ne); + frame_->Drop(); + StoreArgumentsObject(false); + exit.Bind(); +} + + +void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { + ASSERT(slot != NULL); + VirtualFrame::RegisterAllocationScope scope(this); + if (slot->type() == Slot::LOOKUP) { + ASSERT(slot->var()->is_dynamic()); + + // For now, just do a runtime call. + frame_->EmitPush(cp); + frame_->EmitPush(Operand(slot->var()->name())); + + if (init_state == CONST_INIT) { + // Same as the case for a normal store, but ignores attribute + // (e.g. READ_ONLY) of context slot so that we can initialize + // const properties (introduced via eval("const foo = (some + // expr);")). Also, uses the current function context instead of + // the top context. + // + // Note that we must declare the foo upon entry of eval(), via a + // context slot declaration, but we cannot initialize it at the + // same time, because the const declaration may be at the end of + // the eval code (sigh...) and the const variable may have been + // used before (where its value is 'undefined'). Thus, we can only + // do the initialization when we actually encounter the expression + // and when the expression operands are defined and valid, and + // thus we need the split into 2 operations: declaration of the + // context slot followed by initialization. + frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); + } else { + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->CallRuntime(Runtime::kStoreContextSlot, 4); + } + // Storing a variable must keep the (new) value on the expression + // stack. This is necessary for compiling assignment expressions. + frame_->EmitPush(r0); + + } else { + ASSERT(!slot->var()->is_dynamic()); + Register scratch = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + + // The frame must be spilled when branching to this target. + JumpTarget exit; + + if (init_state == CONST_INIT) { + ASSERT(slot->var()->mode() == Variable::CONST); + // Only the first const initialization must be executed (the slot + // still contains 'the hole' value). When the assignment is + // executed, the code is identical to a normal store (see below). + Comment cmnt(masm_, "[ Init const"); + __ ldr(scratch, SlotOperand(slot, scratch)); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(scratch, ip); + exit.Branch(ne); + } + + // We must execute the store. Storing a variable must keep the + // (new) value on the stack. This is necessary for compiling + // assignment expressions. + // + // Note: We will reach here even with slot->var()->mode() == + // Variable::CONST because of const declarations which will + // initialize consts to 'the hole' value and by doing so, end up + // calling this code. r2 may be loaded with context; used below in + // RecordWrite. + Register tos = frame_->Peek(); + __ str(tos, SlotOperand(slot, scratch)); + if (slot->type() == Slot::CONTEXT) { + // Skip write barrier if the written value is a smi. + __ tst(tos, Operand(kSmiTagMask)); + // We don't use tos any more after here. + exit.Branch(eq); + // scratch is loaded with context when calling SlotOperand above. + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + // We need an extra register. Until we have a way to do that in the + // virtual frame we will cheat and ask for a free TOS register. + Register scratch3 = frame_->GetTOSRegister(); + __ RecordWrite(scratch, Operand(offset), scratch2, scratch3); + } + // If we definitely did not jump over the assignment, we do not need + // to bind the exit label. Doing so can defeat peephole + // optimization. + if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) { + exit.Bind(); + } + } +} + + +void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot, + TypeofState typeof_state, + JumpTarget* slow) { + // Check that no extension objects have been created by calls to + // eval from the current scope to the global scope. + Register tmp = frame_->scratch0(); + Register tmp2 = frame_->scratch1(); + Register context = cp; + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + frame_->SpillAll(); + // Check that extension is NULL. + __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); + __ tst(tmp2, tmp2); + slow->Branch(ne); + } + // Load next context in chain. + __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); + __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); + context = tmp; + } + // If no outer scope calls eval, we do not need to check more + // context extensions. + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s->is_eval_scope()) { + frame_->SpillAll(); + Label next, fast; + __ Move(tmp, context); + __ bind(&next); + // Terminate at global context. + __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex); + __ cmp(tmp2, ip); + __ b(eq, &fast); + // Check that extension is NULL. + __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX)); + __ tst(tmp2, tmp2); + slow->Branch(ne); + // Load next context in chain. + __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX)); + __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); + __ b(&next); + __ bind(&fast); + } + + // Load the global object. + LoadGlobal(); + // Setup the name register and call load IC. + frame_->CallLoadIC(slot->var()->name(), + typeof_state == INSIDE_TYPEOF + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT); +} + + +void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + JumpTarget* slow, + JumpTarget* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); + frame_->SpillAll(); + done->Jump(); + + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + frame_->SpillAll(); + Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot(); + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); + if (potential_slot != NULL) { + // Generate fast case for locals that rewrite to slots. + __ ldr(r0, + ContextSlotOperandCheckExtensions(potential_slot, + r1, + r2, + slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r0, ip); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + } + done->Jump(); + } else if (rewrite != NULL) { + // Generate fast case for argument loads. + Property* property = rewrite->AsProperty(); + if (property != NULL) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + Literal* key_literal = property->key()->AsLiteral(); + if (obj_proxy != NULL && + key_literal != NULL && + obj_proxy->IsArguments() && + key_literal->handle()->IsSmi()) { + // Load arguments object if there are no eval-introduced + // variables. Then load the argument from the arguments + // object using keyed load. + __ ldr(r0, + ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(), + r1, + r2, + slow)); + frame_->EmitPush(r0); + __ mov(r1, Operand(key_literal->handle())); + frame_->EmitPush(r1); + EmitKeyedLoad(); + done->Jump(); + } + } + } + } +} + + +void CodeGenerator::VisitSlot(Slot* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Slot"); + LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitVariableProxy(VariableProxy* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ VariableProxy"); + + Variable* var = node->var(); + Expression* expr = var->rewrite(); + if (expr != NULL) { + Visit(expr); + } else { + ASSERT(var->is_global()); + Reference ref(this, node); + ref.GetValue(); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitLiteral(Literal* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Literal"); + Register reg = frame_->GetTOSRegister(); + bool is_smi = node->handle()->IsSmi(); + __ mov(reg, Operand(node->handle())); + frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown()); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ RexExp Literal"); + + Register tmp = VirtualFrame::scratch0(); + // Free up a TOS register that can be used to push the literal. + Register literal = frame_->GetTOSRegister(); + + // Retrieve the literal array and check the allocated entry. + + // Load the function of this activation. + __ ldr(tmp, frame_->Function()); + + // Load the literals array of the function. + __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset)); + + // Load the literal at the ast saved index. + int literal_offset = + FixedArray::kHeaderSize + node->literal_index() * kPointerSize; + __ ldr(literal, FieldMemOperand(tmp, literal_offset)); + + JumpTarget materialized; + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(literal, ip); + // This branch locks the virtual frame at the done label to match the + // one we have here, where the literal register is not on the stack and + // nothing is spilled. + materialized.Branch(ne); + + // If the entry is undefined we call the runtime system to compute + // the literal. + // literal array (0) + frame_->EmitPush(tmp); + // literal index (1) + frame_->EmitPush(Operand(Smi::FromInt(node->literal_index()))); + // RegExp pattern (2) + frame_->EmitPush(Operand(node->pattern())); + // RegExp flags (3) + frame_->EmitPush(Operand(node->flags())); + frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); + __ Move(literal, r0); + + materialized.Bind(); + + frame_->EmitPush(literal); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + frame_->EmitPush(Operand(Smi::FromInt(size))); + frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1); + // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime. + // r0 is newly allocated space. + + // Reuse literal variable with (possibly) a new register, still holding + // the materialized boilerplate. + literal = frame_->PopToRegister(r0); + + __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize); + + // Push the clone. + frame_->EmitPush(r0); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ ObjectLiteral"); + + Register literal = frame_->GetTOSRegister(); + // Load the function of this activation. + __ ldr(literal, frame_->Function()); + // Literal array. + __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset)); + frame_->EmitPush(literal); + // Literal index. + frame_->EmitPush(Operand(Smi::FromInt(node->literal_index()))); + // Constant properties. + frame_->EmitPush(Operand(node->constant_properties())); + // Should the object literal have fast elements? + frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0))); + if (node->depth() > 1) { + frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4); + } else { + frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); + } + frame_->EmitPush(r0); // save the result + + // Mark all computed expressions that are bound to a key that + // is shadowed by a later occurrence of the same key. For the + // marked expressions, no store code is emitted. + node->CalculateEmitStore(); + + for (int i = 0; i < node->properties()->length(); i++) { + // At the start of each iteration, the top of stack contains + // the newly created object literal. + ObjectLiteral::Property* property = node->properties()->at(i); + Literal* key = property->key(); + Expression* value = property->value(); + switch (property->kind()) { + case ObjectLiteral::Property::CONSTANT: + break; + case ObjectLiteral::Property::MATERIALIZED_LITERAL: + if (CompileTimeValue::IsCompileTimeValue(property->value())) break; + // else fall through + case ObjectLiteral::Property::COMPUTED: + if (key->handle()->IsSymbol()) { + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + Load(value); + if (property->emit_store()) { + frame_->PopToR0(); + // Fetch the object literal. + frame_->SpillAllButCopyTOSToR1(); + __ mov(r2, Operand(key->handle())); + frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); + } else { + frame_->Drop(); + } + break; + } + // else fall through + case ObjectLiteral::Property::PROTOTYPE: { + frame_->Dup(); + Load(key); + Load(value); + if (property->emit_store()) { + frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes + frame_->CallRuntime(Runtime::kSetProperty, 4); + } else { + frame_->Drop(3); + } + break; + } + case ObjectLiteral::Property::SETTER: { + frame_->Dup(); + Load(key); + frame_->EmitPush(Operand(Smi::FromInt(1))); + Load(value); + frame_->CallRuntime(Runtime::kDefineAccessor, 4); + break; + } + case ObjectLiteral::Property::GETTER: { + frame_->Dup(); + Load(key); + frame_->EmitPush(Operand(Smi::FromInt(0))); + Load(value); + frame_->CallRuntime(Runtime::kDefineAccessor, 4); + break; + } + } + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ ArrayLiteral"); + + Register tos = frame_->GetTOSRegister(); + // Load the function of this activation. + __ ldr(tos, frame_->Function()); + // Load the literals array of the function. + __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset)); + frame_->EmitPush(tos); + frame_->EmitPush(Operand(Smi::FromInt(node->literal_index()))); + frame_->EmitPush(Operand(node->constant_elements())); + int length = node->values()->length(); + if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + frame_->CallStub(&stub, 3); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2); + } else if (node->depth() > 1) { + frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { + frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); + } else { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); + frame_->CallStub(&stub, 3); + } + frame_->EmitPush(r0); // save the result + // r0: created object literal + + // Generate code to set the elements in the array that are not + // literals. + for (int i = 0; i < node->values()->length(); i++) { + Expression* value = node->values()->at(i); + + // If value is a literal the property value is already set in the + // boilerplate object. + if (value->AsLiteral() != NULL) continue; + // If value is a materialized literal the property value is already set + // in the boilerplate object if it is simple. + if (CompileTimeValue::IsCompileTimeValue(value)) continue; + + // The property must be set by generated code. + Load(value); + frame_->PopToR0(); + // Fetch the object literal. + frame_->SpillAllButCopyTOSToR1(); + + // Get the elements array. + __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); + + // Write to the indexed properties array. + int offset = i * kPointerSize + FixedArray::kHeaderSize; + __ str(r0, FieldMemOperand(r1, offset)); + + // Update the write barrier for the array address. + __ RecordWrite(r1, Operand(offset), r3, r2); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + // Call runtime routine to allocate the catch extension object and + // assign the exception value to the catch variable. + Comment cmnt(masm_, "[ CatchExtensionObject"); + Load(node->key()); + Load(node->value()); + frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); + frame_->EmitPush(r0); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::EmitSlotAssignment(Assignment* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm(), "[ Variable Assignment"); + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + ASSERT(var != NULL); + Slot* slot = var->AsSlot(); + ASSERT(slot != NULL); + + // Evaluate the right-hand side. + if (node->is_compound()) { + // For a compound assignment the right-hand side is a binary operation + // between the current property value and the actual right-hand side. + LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); + + // Perform the binary operation. + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); + if (literal != NULL && literal->handle()->IsSmi()) { + SmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } else { + GenerateInlineSmi inline_smi = + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; + if (literal != NULL) { + ASSERT(!literal->handle()->IsSmi()); + inline_smi = DONT_GENERATE_INLINE_SMI; + } + Load(node->value()); + GenericBinaryOperation(node->binary_op(), + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, + inline_smi); + } + } else { + Load(node->value()); + } + + // Perform the assignment. + if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) { + CodeForSourcePosition(node->position()); + StoreToSlot(slot, + node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm(), "[ Named Property Assignment"); + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + Property* prop = node->target()->AsProperty(); + ASSERT(var == NULL || (prop == NULL && var->is_global())); + + // Initialize name and evaluate the receiver sub-expression if necessary. If + // the receiver is trivial it is not placed on the stack at this point, but + // loaded whenever actually needed. + Handle<String> name; + bool is_trivial_receiver = false; + if (var != NULL) { + name = var->name(); + } else { + Literal* lit = prop->key()->AsLiteral(); + ASSERT_NOT_NULL(lit); + name = Handle<String>::cast(lit->handle()); + // Do not materialize the receiver on the frame if it is trivial. + is_trivial_receiver = prop->obj()->IsTrivial(); + if (!is_trivial_receiver) Load(prop->obj()); + } + + // Change to slow case in the beginning of an initialization block to + // avoid the quadratic behavior of repeatedly adding fast properties. + if (node->starts_initialization_block()) { + // Initialization block consists of assignments of the form expr.x = ..., so + // this will never be an assignment to a variable, so there must be a + // receiver object. + ASSERT_EQ(NULL, var); + if (is_trivial_receiver) { + Load(prop->obj()); + } else { + frame_->Dup(); + } + frame_->CallRuntime(Runtime::kToSlowProperties, 1); + } + + // Change to fast case at the end of an initialization block. To prepare for + // that add an extra copy of the receiver to the frame, so that it can be + // converted back to fast case after the assignment. + if (node->ends_initialization_block() && !is_trivial_receiver) { + frame_->Dup(); + } + + // Stack layout: + // [tos] : receiver (only materialized if non-trivial) + // [tos+1] : receiver if at the end of an initialization block + + // Evaluate the right-hand side. + if (node->is_compound()) { + // For a compound assignment the right-hand side is a binary operation + // between the current property value and the actual right-hand side. + if (is_trivial_receiver) { + Load(prop->obj()); + } else if (var != NULL) { + LoadGlobal(); + } else { + frame_->Dup(); + } + EmitNamedLoad(name, var != NULL); + + // Perform the binary operation. + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); + if (literal != NULL && literal->handle()->IsSmi()) { + SmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } else { + GenerateInlineSmi inline_smi = + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; + if (literal != NULL) { + ASSERT(!literal->handle()->IsSmi()); + inline_smi = DONT_GENERATE_INLINE_SMI; + } + Load(node->value()); + GenericBinaryOperation(node->binary_op(), + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, + inline_smi); + } + } else { + // For non-compound assignment just load the right-hand side. + Load(node->value()); + } + + // Stack layout: + // [tos] : value + // [tos+1] : receiver (only materialized if non-trivial) + // [tos+2] : receiver if at the end of an initialization block + + // Perform the assignment. It is safe to ignore constants here. + ASSERT(var == NULL || var->mode() != Variable::CONST); + ASSERT_NE(Token::INIT_CONST, node->op()); + if (is_trivial_receiver) { + // Load the receiver and swap with the value. + Load(prop->obj()); + Register t0 = frame_->PopToRegister(); + Register t1 = frame_->PopToRegister(t0); + frame_->EmitPush(t0); + frame_->EmitPush(t1); + } + CodeForSourcePosition(node->position()); + bool is_contextual = (var != NULL); + EmitNamedStore(name, is_contextual); + frame_->EmitPush(r0); + + // Change to fast case at the end of an initialization block. + if (node->ends_initialization_block()) { + ASSERT_EQ(NULL, var); + // The argument to the runtime call is the receiver. + if (is_trivial_receiver) { + Load(prop->obj()); + } else { + // A copy of the receiver is below the value of the assignment. Swap + // the receiver and the value of the assignment expression. + Register t0 = frame_->PopToRegister(); + Register t1 = frame_->PopToRegister(t0); + frame_->EmitPush(t0); + frame_->EmitPush(t1); + } + frame_->CallRuntime(Runtime::kToFastProperties, 1); + } + + // Stack layout: + // [tos] : result + + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Keyed Property Assignment"); + Property* prop = node->target()->AsProperty(); + ASSERT_NOT_NULL(prop); + + // Evaluate the receiver subexpression. + Load(prop->obj()); + + WriteBarrierCharacter wb_info; + + // Change to slow case in the beginning of an initialization block to + // avoid the quadratic behavior of repeatedly adding fast properties. + if (node->starts_initialization_block()) { + frame_->Dup(); + frame_->CallRuntime(Runtime::kToSlowProperties, 1); + } + + // Change to fast case at the end of an initialization block. To prepare for + // that add an extra copy of the receiver to the frame, so that it can be + // converted back to fast case after the assignment. + if (node->ends_initialization_block()) { + frame_->Dup(); + } + + // Evaluate the key subexpression. + Load(prop->key()); + + // Stack layout: + // [tos] : key + // [tos+1] : receiver + // [tos+2] : receiver if at the end of an initialization block + // + // Evaluate the right-hand side. + if (node->is_compound()) { + // For a compound assignment the right-hand side is a binary operation + // between the current property value and the actual right-hand side. + // Duplicate receiver and key for loading the current property value. + frame_->Dup2(); + EmitKeyedLoad(); + frame_->EmitPush(r0); + + // Perform the binary operation. + Literal* literal = node->value()->AsLiteral(); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); + if (literal != NULL && literal->handle()->IsSmi()) { + SmiOperation(node->binary_op(), + literal->handle(), + false, + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); + } else { + GenerateInlineSmi inline_smi = + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; + if (literal != NULL) { + ASSERT(!literal->handle()->IsSmi()); + inline_smi = DONT_GENERATE_INLINE_SMI; + } + Load(node->value()); + GenericBinaryOperation(node->binary_op(), + overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE, + inline_smi); + } + wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI; + } else { + // For non-compound assignment just load the right-hand side. + Load(node->value()); + wb_info = node->value()->AsLiteral() != NULL ? + NEVER_NEWSPACE : + (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI); + } + + // Stack layout: + // [tos] : value + // [tos+1] : key + // [tos+2] : receiver + // [tos+3] : receiver if at the end of an initialization block + + // Perform the assignment. It is safe to ignore constants here. + ASSERT(node->op() != Token::INIT_CONST); + CodeForSourcePosition(node->position()); + EmitKeyedStore(prop->key()->type(), wb_info); + frame_->EmitPush(r0); + + // Stack layout: + // [tos] : result + // [tos+1] : receiver if at the end of an initialization block + + // Change to fast case at the end of an initialization block. + if (node->ends_initialization_block()) { + // The argument to the runtime call is the extra copy of the receiver, + // which is below the value of the assignment. Swap the receiver and + // the value of the assignment expression. + Register t0 = frame_->PopToRegister(); + Register t1 = frame_->PopToRegister(t0); + frame_->EmitPush(t1); + frame_->EmitPush(t0); + frame_->CallRuntime(Runtime::kToFastProperties, 1); + } + + // Stack layout: + // [tos] : result + + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitAssignment(Assignment* node) { + VirtualFrame::RegisterAllocationScope scope(this); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Assignment"); + + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + Property* prop = node->target()->AsProperty(); + + if (var != NULL && !var->is_global()) { + EmitSlotAssignment(node); + + } else if ((prop != NULL && prop->key()->IsPropertyName()) || + (var != NULL && var->is_global())) { + // Properties whose keys are property names and global variables are + // treated as named property references. We do not need to consider + // global 'this' because it is not a valid left-hand side. + EmitNamedPropertyAssignment(node); + + } else if (prop != NULL) { + // Other properties (including rewritten parameters for a function that + // uses arguments) are keyed property assignments. + EmitKeyedPropertyAssignment(node); + + } else { + // Invalid left-hand side. + Load(node->target()); + frame_->CallRuntime(Runtime::kThrowReferenceError, 1); + // The runtime call doesn't actually return but the code generator will + // still generate code and expects a certain frame height. + frame_->EmitPush(r0); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitThrow(Throw* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Throw"); + + Load(node->exception()); + CodeForSourcePosition(node->position()); + frame_->CallRuntime(Runtime::kThrow, 1); + frame_->EmitPush(r0); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitProperty(Property* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Property"); + + { Reference property(this, node); + property.GetValue(); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitCall(Call* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ Call"); + + Expression* function = node->expression(); + ZoneList<Expression*>* args = node->arguments(); + + // Standard function call. + // Check if the function is a variable or a property. + Variable* var = function->AsVariableProxy()->AsVariable(); + Property* property = function->AsProperty(); + + // ------------------------------------------------------------------------ + // Fast-case: Use inline caching. + // --- + // According to ECMA-262, section 11.2.3, page 44, the function to call + // must be resolved after the arguments have been evaluated. The IC code + // automatically handles this by loading the arguments before the function + // is resolved in cache misses (this also holds for megamorphic calls). + // ------------------------------------------------------------------------ + + if (var != NULL && var->is_possibly_eval()) { + // ---------------------------------- + // JavaScript example: 'eval(arg)' // eval is not known to be shadowed + // ---------------------------------- + + // In a call to eval, we first call %ResolvePossiblyDirectEval to + // resolve the function we need to call and the receiver of the + // call. Then we call the resolved function using the given + // arguments. + + // Prepare stack for call to resolved function. + Load(function); + + // Allocate a frame slot for the receiver. + frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex); + + // Load the arguments. + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + VirtualFrame::SpilledScope spilled_scope(frame_); + + // If we know that eval can only be shadowed by eval-introduced + // variables we attempt to load the global eval function directly + // in generated code. If we succeed, there is no need to perform a + // context lookup in the runtime system. + JumpTarget done; + if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { + ASSERT(var->AsSlot()->type() == Slot::LOOKUP); + JumpTarget slow; + // Prepare the stack for the call to + // ResolvePossiblyDirectEvalNoLookup by pushing the loaded + // function, the first argument to the eval call and the + // receiver. + LoadFromGlobalSlotCheckExtensions(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow); + frame_->EmitPush(r0); + if (arg_count > 0) { + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + frame_->EmitPush(r1); + } else { + frame_->EmitPush(r2); + } + __ ldr(r1, frame_->Receiver()); + frame_->EmitPush(r1); + + // Push the strict mode flag. + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + + frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4); + + done.Jump(); + slow.Bind(); + } + + // Prepare the stack for the call to ResolvePossiblyDirectEval by + // pushing the loaded function, the first argument to the eval + // call and the receiver. + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize)); + frame_->EmitPush(r1); + if (arg_count > 0) { + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + frame_->EmitPush(r1); + } else { + frame_->EmitPush(r2); + } + __ ldr(r1, frame_->Receiver()); + frame_->EmitPush(r1); + + // Push the strict mode flag. + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + + // Resolve the call. + frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); + + // If we generated fast-case code bind the jump-target where fast + // and slow case merge. + if (done.is_linked()) done.Bind(); + + // Touch up stack with the right values for the function and the receiver. + __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ str(r1, MemOperand(sp, arg_count * kPointerSize)); + + // Call the function. + CodeForSourcePosition(node->position()); + + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); + frame_->CallStub(&call_function, arg_count + 1); + + __ ldr(cp, frame_->Context()); + // Remove the function from the stack. + frame_->Drop(); + frame_->EmitPush(r0); + + } else if (var != NULL && !var->is_this() && var->is_global()) { + // ---------------------------------- + // JavaScript example: 'foo(1, 2, 3)' // foo is global + // ---------------------------------- + // Pass the global object as the receiver and let the IC stub + // patch the stack to use the global proxy as 'this' in the + // invoked function. + LoadGlobal(); + + // Load the arguments. + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + VirtualFrame::SpilledScope spilled_scope(frame_); + // Setup the name register and call the IC initialization code. + __ mov(r2, Operand(var->name())); + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop); + CodeForSourcePosition(node->position()); + frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT, + arg_count + 1); + __ ldr(cp, frame_->Context()); + frame_->EmitPush(r0); + + } else if (var != NULL && var->AsSlot() != NULL && + var->AsSlot()->type() == Slot::LOOKUP) { + // ---------------------------------- + // JavaScript examples: + // + // with (obj) foo(1, 2, 3) // foo may be in obj. + // + // function f() {}; + // function g() { + // eval(...); + // f(); // f could be in extension object. + // } + // ---------------------------------- + + JumpTarget slow, done; + + // Generate fast case for loading functions from slots that + // correspond to local/global variables or arguments unless they + // are shadowed by eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(var->AsSlot(), + NOT_INSIDE_TYPEOF, + &slow, + &done); + + slow.Bind(); + // Load the function + frame_->EmitPush(cp); + frame_->EmitPush(Operand(var->name())); + frame_->CallRuntime(Runtime::kLoadContextSlot, 2); + // r0: slot value; r1: receiver + + // Load the receiver. + frame_->EmitPush(r0); // function + frame_->EmitPush(r1); // receiver + + // If fast case code has been generated, emit code to push the + // function and receiver and have the slow path jump around this + // code. + if (done.is_linked()) { + JumpTarget call; + call.Jump(); + done.Bind(); + frame_->EmitPush(r0); // function + LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver + call.Bind(); + } + + // Call the function. At this point, everything is spilled but the + // function and receiver are in r0 and r1. + CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); + frame_->EmitPush(r0); + + } else if (property != NULL) { + // Check if the key is a literal string. + Literal* literal = property->key()->AsLiteral(); + + if (literal != NULL && literal->handle()->IsSymbol()) { + // ------------------------------------------------------------------ + // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)' + // ------------------------------------------------------------------ + + Handle<String> name = Handle<String>::cast(literal->handle()); + + if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION && + name->IsEqualTo(CStrVector("apply")) && + args->length() == 2 && + args->at(1)->AsVariableProxy() != NULL && + args->at(1)->AsVariableProxy()->IsArguments()) { + // Use the optimized Function.prototype.apply that avoids + // allocating lazily allocated arguments objects. + CallApplyLazy(property->obj(), + args->at(0), + args->at(1)->AsVariableProxy(), + node->position()); + + } else { + Load(property->obj()); // Receiver. + // Load the arguments. + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + VirtualFrame::SpilledScope spilled_scope(frame_); + // Set the name register and call the IC initialization code. + __ mov(r2, Operand(name)); + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + Handle<Code> stub = + StubCache::ComputeCallInitialize(arg_count, in_loop); + CodeForSourcePosition(node->position()); + frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); + __ ldr(cp, frame_->Context()); + frame_->EmitPush(r0); + } + + } else { + // ------------------------------------------- + // JavaScript example: 'array[index](1, 2, 3)' + // ------------------------------------------- + + // Load the receiver and name of the function. + Load(property->obj()); + Load(property->key()); + + if (property->is_synthetic()) { + EmitKeyedLoad(); + // Put the function below the receiver. + // Use the global receiver. + frame_->EmitPush(r0); // Function. + LoadGlobalReceiver(VirtualFrame::scratch0()); + // Call the function. + CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position()); + frame_->EmitPush(r0); + } else { + // Swap the name of the function and the receiver on the stack to follow + // the calling convention for call ICs. + Register key = frame_->PopToRegister(); + Register receiver = frame_->PopToRegister(key); + frame_->EmitPush(key); + frame_->EmitPush(receiver); + + // Load the arguments. + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + // Load the key into r2 and call the IC initialization code. + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + Handle<Code> stub = + StubCache::ComputeKeyedCallInitialize(arg_count, in_loop); + CodeForSourcePosition(node->position()); + frame_->SpillAll(); + __ ldr(r2, frame_->ElementAt(arg_count + 1)); + frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); + frame_->Drop(); // Drop the key still on the stack. + __ ldr(cp, frame_->Context()); + frame_->EmitPush(r0); + } + } + + } else { + // ---------------------------------- + // JavaScript example: 'foo(1, 2, 3)' // foo is not global + // ---------------------------------- + + // Load the function. + Load(function); + + // Pass the global proxy as the receiver. + LoadGlobalReceiver(VirtualFrame::scratch0()); + + // Call the function. + CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); + frame_->EmitPush(r0); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitCallNew(CallNew* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ CallNew"); + + // According to ECMA-262, section 11.2.2, page 44, the function + // expression in new calls must be evaluated before the + // arguments. This is different from ordinary calls, where the + // actual function to call is resolved after the arguments have been + // evaluated. + + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. + Load(node->expression()); + + // Push the arguments ("left-to-right") on the stack. + ZoneList<Expression*>* args = node->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + // Spill everything from here to simplify the implementation. + VirtualFrame::SpilledScope spilled_scope(frame_); + + // Load the argument count into r0 and the function into r1 as per + // calling convention. + __ mov(r0, Operand(arg_count)); + __ ldr(r1, frame_->ElementAt(arg_count)); + + // Call the construct call builtin that handles allocation and + // constructor invocation. + CodeForSourcePosition(node->position()); + Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall)); + frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1); + frame_->EmitPush(r0); + + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { + Register scratch = VirtualFrame::scratch0(); + JumpTarget null, function, leave, non_function_constructor; + + // Load the object into register. + ASSERT(args->length() == 1); + Load(args->at(0)); + Register tos = frame_->PopToRegister(); + + // If the object is a smi, we return null. + __ tst(tos, Operand(kSmiTagMask)); + null.Branch(eq); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE); + null.Branch(lt); + + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ cmp(scratch, Operand(JS_FUNCTION_TYPE)); + function.Branch(eq); + + // Check if the constructor in the map is a function. + __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset)); + __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE); + non_function_constructor.Branch(ne); + + // The tos register now contains the constructor function. Grab the + // instance class name from there. + __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(tos, + FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset)); + frame_->EmitPush(tos); + leave.Jump(); + + // Functions have class 'Function'. + function.Bind(); + __ mov(tos, Operand(Factory::function_class_symbol())); + frame_->EmitPush(tos); + leave.Jump(); + + // Objects with a non-function constructor have class 'Object'. + non_function_constructor.Bind(); + __ mov(tos, Operand(Factory::Object_symbol())); + frame_->EmitPush(tos); + leave.Jump(); + + // Non-JS objects have class null. + null.Bind(); + __ LoadRoot(tos, Heap::kNullValueRootIndex); + frame_->EmitPush(tos); + + // All done. + leave.Bind(); +} + + +void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { + Register scratch = VirtualFrame::scratch0(); + JumpTarget leave; + + ASSERT(args->length() == 1); + Load(args->at(0)); + Register tos = frame_->PopToRegister(); // tos contains object. + // if (object->IsSmi()) return the object. + __ tst(tos, Operand(kSmiTagMask)); + leave.Branch(eq); + // It is a heap object - get map. If (!object->IsJSValue()) return the object. + __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE); + leave.Branch(ne); + // Load the value. + __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset)); + leave.Bind(); + frame_->EmitPush(tos); +} + + +void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + JumpTarget leave; + + ASSERT(args->length() == 2); + Load(args->at(0)); // Load the object. + Load(args->at(1)); // Load the value. + Register value = frame_->PopToRegister(); + Register object = frame_->PopToRegister(value); + // if (object->IsSmi()) return object. + __ tst(object, Operand(kSmiTagMask)); + leave.Branch(eq); + // It is a heap object - get map. If (!object->IsJSValue()) return the object. + __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE); + leave.Branch(ne); + // Store the value. + __ str(value, FieldMemOperand(object, JSValue::kValueOffset)); + // Update the write barrier. + __ RecordWrite(object, + Operand(JSValue::kValueOffset - kHeapObjectTag), + scratch1, + scratch2); + // Leave. + leave.Bind(); + frame_->EmitPush(value); +} + + +void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register reg = frame_->PopToRegister(); + __ tst(reg, Operand(kSmiTagMask)); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { + // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc. + ASSERT_EQ(args->length(), 3); +#ifdef ENABLE_LOGGING_AND_PROFILING + if (ShouldGenerateLog(args->at(0))) { + Load(args->at(1)); + Load(args->at(2)); + frame_->CallRuntime(Runtime::kLog, 2); + } +#endif + frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex); +} + + +void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register reg = frame_->PopToRegister(); + __ tst(reg, Operand(kSmiTagMask | 0x80000000u)); + cc_reg_ = eq; +} + + +// Generates the Math.pow method. +void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { + ASSERT(args->length() == 2); + Load(args->at(0)); + Load(args->at(1)); + + if (!CpuFeatures::IsSupported(VFP3)) { + frame_->CallRuntime(Runtime::kMath_pow, 2); + frame_->EmitPush(r0); + } else { + CpuFeatures::Scope scope(VFP3); + JumpTarget runtime, done; + Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return; + + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + + // Get base and exponent to registers. + Register exponent = frame_->PopToRegister(); + Register base = frame_->PopToRegister(exponent); + Register heap_number_map = no_reg; + + // Set the frame for the runtime jump target. The code below jumps to the + // jump target label so the frame needs to be established before that. + ASSERT(runtime.entry_frame() == NULL); + runtime.set_entry_frame(frame_); + + __ JumpIfNotSmi(exponent, &exponent_nonsmi); + __ JumpIfNotSmi(base, &base_nonsmi); + + heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + // Exponent is a smi and base is a smi. Get the smi value into vfp register + // d1. + __ SmiToDoubleVFPRegister(base, d1, scratch1, s0); + __ b(&powi); + + __ bind(&base_nonsmi); + // Exponent is smi and base is non smi. Get the double value from the base + // into vfp register d1. + __ ObjectToDoubleVFPRegister(base, d1, + scratch1, scratch2, heap_number_map, s0, + runtime.entry_label()); + + __ bind(&powi); + + // Load 1.0 into d0. + __ vmov(d0, 1.0); + + // Get the absolute untagged value of the exponent and use that for the + // calculation. + __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC); + // Negate if negative. + __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi); + __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative. + + // Run through all the bits in the exponent. The result is calculated in d0 + // and d1 holds base^(bit^2). + Label more_bits; + __ bind(&more_bits); + __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC); + __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set. + __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done. + __ b(ne, &more_bits); + + // If exponent is positive we are done. + __ cmp(exponent, Operand(0, RelocInfo::NONE)); + __ b(ge, &allocate_return); + + // If exponent is negative result is 1/result (d2 already holds 1.0 in that + // case). However if d0 has reached infinity this will not provide the + // correct result, so call runtime if that is the case. + __ mov(scratch2, Operand(0x7FF00000)); + __ mov(scratch1, Operand(0, RelocInfo::NONE)); + __ vmov(d1, scratch1, scratch2); // Load infinity into d1. + __ VFPCompareAndSetFlags(d0, d1); + runtime.Branch(eq); // d0 reached infinity. + __ vdiv(d0, d2, d0); + __ b(&allocate_return); + + __ bind(&exponent_nonsmi); + // Special handling of raising to the power of -0.5 and 0.5. First check + // that the value is a heap number and that the lower bits (which for both + // values are zero). + heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset)); + __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset)); + __ cmp(scratch1, heap_number_map); + runtime.Branch(ne); + __ tst(scratch2, scratch2); + runtime.Branch(ne); + + // Load the higher bits (which contains the floating point exponent). + __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset)); + + // Compare exponent with -0.5. + __ cmp(scratch1, Operand(0xbfe00000)); + __ b(ne, ¬_minus_half); + + // Get the double value from the base into vfp register d0. + __ ObjectToDoubleVFPRegister(base, d0, + scratch1, scratch2, heap_number_map, s0, + runtime.entry_label(), + AVOID_NANS_AND_INFINITIES); + + // Convert -0 into +0 by adding +0. + __ vmov(d2, 0.0); + __ vadd(d0, d2, d0); + // Load 1.0 into d2. + __ vmov(d2, 1.0); + + // Calculate the reciprocal of the square root. + __ vsqrt(d0, d0); + __ vdiv(d0, d2, d0); + + __ b(&allocate_return); + + __ bind(¬_minus_half); + // Compare exponent with 0.5. + __ cmp(scratch1, Operand(0x3fe00000)); + runtime.Branch(ne); + + // Get the double value from the base into vfp register d0. + __ ObjectToDoubleVFPRegister(base, d0, + scratch1, scratch2, heap_number_map, s0, + runtime.entry_label(), + AVOID_NANS_AND_INFINITIES); + // Convert -0 into +0 by adding +0. + __ vmov(d2, 0.0); + __ vadd(d0, d2, d0); + __ vsqrt(d0, d0); + + __ bind(&allocate_return); + Register scratch3 = r5; + __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2, + heap_number_map, runtime.entry_label()); + __ mov(base, scratch3); + done.Jump(); + + runtime.Bind(); + + // Push back the arguments again for the runtime call. + frame_->EmitPush(base); + frame_->EmitPush(exponent); + frame_->CallRuntime(Runtime::kMath_pow, 2); + __ Move(base, r0); + + done.Bind(); + frame_->EmitPush(base); + } +} + + +// Generates the Math.sqrt method. +void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + + if (!CpuFeatures::IsSupported(VFP3)) { + frame_->CallRuntime(Runtime::kMath_sqrt, 1); + frame_->EmitPush(r0); + } else { + CpuFeatures::Scope scope(VFP3); + JumpTarget runtime, done; + + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + + // Get the value from the frame. + Register tos = frame_->PopToRegister(); + + // Set the frame for the runtime jump target. The code below jumps to the + // jump target label so the frame needs to be established before that. + ASSERT(runtime.entry_frame() == NULL); + runtime.set_entry_frame(frame_); + + Register heap_number_map = r6; + Register new_heap_number = r5; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + // Get the double value from the heap number into vfp register d0. + __ ObjectToDoubleVFPRegister(tos, d0, + scratch1, scratch2, heap_number_map, s0, + runtime.entry_label()); + + // Calculate the square root of d0 and place result in a heap number object. + __ vsqrt(d0, d0); + __ AllocateHeapNumberWithValue(new_heap_number, + d0, + scratch1, scratch2, + heap_number_map, + runtime.entry_label()); + __ mov(tos, Operand(new_heap_number)); + done.Jump(); + + runtime.Bind(); + // Push back the argument again for the runtime call. + frame_->EmitPush(tos); + frame_->CallRuntime(Runtime::kMath_sqrt, 1); + __ Move(tos, r0); + + done.Bind(); + frame_->EmitPush(tos); + } +} + + +class DeferredStringCharCodeAt : public DeferredCode { + public: + DeferredStringCharCodeAt(Register object, + Register index, + Register scratch, + Register result) + : result_(result), + char_code_at_generator_(object, + index, + scratch, + result, + &need_conversion_, + &need_conversion_, + &index_out_of_range_, + STRING_INDEX_IS_NUMBER) {} + + StringCharCodeAtGenerator* fast_case_generator() { + return &char_code_at_generator_; + } + + virtual void Generate() { + VirtualFrameRuntimeCallHelper call_helper(frame_state()); + char_code_at_generator_.GenerateSlow(masm(), call_helper); + + __ bind(&need_conversion_); + // Move the undefined value into the result register, which will + // trigger conversion. + __ LoadRoot(result_, Heap::kUndefinedValueRootIndex); + __ jmp(exit_label()); + + __ bind(&index_out_of_range_); + // When the index is out of range, the spec requires us to return + // NaN. + __ LoadRoot(result_, Heap::kNanValueRootIndex); + __ jmp(exit_label()); + } + + private: + Register result_; + + Label need_conversion_; + Label index_out_of_range_; + + StringCharCodeAtGenerator char_code_at_generator_; +}; + + +// This generates code that performs a String.prototype.charCodeAt() call +// or returns a smi in order to trigger conversion. +void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) { + Comment(masm_, "[ GenerateStringCharCodeAt"); + ASSERT(args->length() == 2); + + Load(args->at(0)); + Load(args->at(1)); + + Register index = frame_->PopToRegister(); + Register object = frame_->PopToRegister(index); + + // We need two extra registers. + Register scratch = VirtualFrame::scratch0(); + Register result = VirtualFrame::scratch1(); + + DeferredStringCharCodeAt* deferred = + new DeferredStringCharCodeAt(object, + index, + scratch, + result); + deferred->fast_case_generator()->GenerateFast(masm_); + deferred->BindExit(); + frame_->EmitPush(result); +} + + +class DeferredStringCharFromCode : public DeferredCode { + public: + DeferredStringCharFromCode(Register code, + Register result) + : char_from_code_generator_(code, result) {} + + StringCharFromCodeGenerator* fast_case_generator() { + return &char_from_code_generator_; + } + + virtual void Generate() { + VirtualFrameRuntimeCallHelper call_helper(frame_state()); + char_from_code_generator_.GenerateSlow(masm(), call_helper); + } + + private: + StringCharFromCodeGenerator char_from_code_generator_; +}; + + +// Generates code for creating a one-char string from a char code. +void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) { + Comment(masm_, "[ GenerateStringCharFromCode"); + ASSERT(args->length() == 1); + + Load(args->at(0)); + + Register result = frame_->GetTOSRegister(); + Register code = frame_->PopToRegister(result); + + DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode( + code, result); + deferred->fast_case_generator()->GenerateFast(masm_); + deferred->BindExit(); + frame_->EmitPush(result); +} + + +class DeferredStringCharAt : public DeferredCode { + public: + DeferredStringCharAt(Register object, + Register index, + Register scratch1, + Register scratch2, + Register result) + : result_(result), + char_at_generator_(object, + index, + scratch1, + scratch2, + result, + &need_conversion_, + &need_conversion_, + &index_out_of_range_, + STRING_INDEX_IS_NUMBER) {} + + StringCharAtGenerator* fast_case_generator() { + return &char_at_generator_; + } + + virtual void Generate() { + VirtualFrameRuntimeCallHelper call_helper(frame_state()); + char_at_generator_.GenerateSlow(masm(), call_helper); + + __ bind(&need_conversion_); + // Move smi zero into the result register, which will trigger + // conversion. + __ mov(result_, Operand(Smi::FromInt(0))); + __ jmp(exit_label()); + + __ bind(&index_out_of_range_); + // When the index is out of range, the spec requires us to return + // the empty string. + __ LoadRoot(result_, Heap::kEmptyStringRootIndex); + __ jmp(exit_label()); + } + + private: + Register result_; + + Label need_conversion_; + Label index_out_of_range_; + + StringCharAtGenerator char_at_generator_; +}; + + +// This generates code that performs a String.prototype.charAt() call +// or returns a smi in order to trigger conversion. +void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) { + Comment(masm_, "[ GenerateStringCharAt"); + ASSERT(args->length() == 2); + + Load(args->at(0)); + Load(args->at(1)); + + Register index = frame_->PopToRegister(); + Register object = frame_->PopToRegister(index); + + // We need three extra registers. + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + // Use r6 without notifying the virtual frame. + Register result = r6; + + DeferredStringCharAt* deferred = + new DeferredStringCharAt(object, + index, + scratch1, + scratch2, + result); + deferred->fast_case_generator()->GenerateFast(masm_); + deferred->BindExit(); + frame_->EmitPush(result); +} + + +void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + JumpTarget answer; + // We need the CC bits to come out as not_equal in the case where the + // object is a smi. This can't be done with the usual test opcode so + // we use XOR to get the right CC bits. + Register possible_array = frame_->PopToRegister(); + Register scratch = VirtualFrame::scratch0(); + __ and_(scratch, possible_array, Operand(kSmiTagMask)); + __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC); + answer.Branch(ne); + // It is a heap object - get the map. Check if the object is a JS array. + __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE); + answer.Bind(); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + JumpTarget answer; + // We need the CC bits to come out as not_equal in the case where the + // object is a smi. This can't be done with the usual test opcode so + // we use XOR to get the right CC bits. + Register possible_regexp = frame_->PopToRegister(); + Register scratch = VirtualFrame::scratch0(); + __ and_(scratch, possible_regexp, Operand(kSmiTagMask)); + __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC); + answer.Branch(ne); + // It is a heap object - get the map. Check if the object is a regexp. + __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE); + answer.Bind(); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { + // This generates a fast version of: + // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') + ASSERT(args->length() == 1); + Load(args->at(0)); + Register possible_object = frame_->PopToRegister(); + __ tst(possible_object, Operand(kSmiTagMask)); + false_target()->Branch(eq); + + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(possible_object, ip); + true_target()->Branch(eq); + + Register map_reg = VirtualFrame::scratch0(); + __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined when tested with typeof. + __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset)); + __ tst(possible_object, Operand(1 << Map::kIsUndetectable)); + false_target()->Branch(ne); + + __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); + __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE)); + false_target()->Branch(lt); + __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE)); + cc_reg_ = le; +} + + +void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) { + // This generates a fast version of: + // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' || + // typeof(arg) == function). + // It includes undetectable objects (as opposed to IsObject). + ASSERT(args->length() == 1); + Load(args->at(0)); + Register value = frame_->PopToRegister(); + __ tst(value, Operand(kSmiTagMask)); + false_target()->Branch(eq); + // Check that this is an object. + __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset)); + __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset)); + __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE)); + cc_reg_ = ge; +} + + +// Deferred code to check whether the String JavaScript object is safe for using +// default value of. This code is called after the bit caching this information +// in the map has been checked with the map for the object in the map_result_ +// register. On return the register map_result_ contains 1 for true and 0 for +// false. +class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { + public: + DeferredIsStringWrapperSafeForDefaultValueOf(Register object, + Register map_result, + Register scratch1, + Register scratch2) + : object_(object), + map_result_(map_result), + scratch1_(scratch1), + scratch2_(scratch2) { } + + virtual void Generate() { + Label false_result; + + // Check that map is loaded as expected. + if (FLAG_debug_code) { + __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ cmp(map_result_, ip); + __ Assert(eq, "Map not in expected register"); + } + + // Check for fast case object. Generate false result for slow case object. + __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset)); + __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHashTableMapRootIndex); + __ cmp(scratch1_, ip); + __ b(eq, &false_result); + + // Look for valueOf symbol in the descriptor array, and indicate false if + // found. The type is not checked, so if it is a transition it is a false + // negative. + __ ldr(map_result_, + FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset)); + __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset)); + // map_result_: descriptor array + // scratch2_: length of descriptor array + // Calculate the end of the descriptor array. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kPointerSize == 4); + __ add(scratch1_, + map_result_, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch1_, + scratch1_, + Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize)); + + // Calculate location of the first key name. + __ add(map_result_, + map_result_, + Operand(FixedArray::kHeaderSize - kHeapObjectTag + + DescriptorArray::kFirstIndex * kPointerSize)); + // Loop through all the keys in the descriptor array. If one of these is the + // symbol valueOf the result is false. + Label entry, loop; + // The use of ip to store the valueOf symbol asumes that it is not otherwise + // used in the loop below. + __ mov(ip, Operand(Factory::value_of_symbol())); + __ jmp(&entry); + __ bind(&loop); + __ ldr(scratch2_, MemOperand(map_result_, 0)); + __ cmp(scratch2_, ip); + __ b(eq, &false_result); + __ add(map_result_, map_result_, Operand(kPointerSize)); + __ bind(&entry); + __ cmp(map_result_, Operand(scratch1_)); + __ b(ne, &loop); + + // Reload map as register map_result_ was used as temporary above. + __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + + // If a valueOf property is not found on the object check that it's + // prototype is the un-modified String prototype. If not result is false. + __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset)); + __ tst(scratch1_, Operand(kSmiTagMask)); + __ b(eq, &false_result); + __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset)); + __ ldr(scratch2_, + ContextOperand(cp, Context::GLOBAL_INDEX)); + __ ldr(scratch2_, + FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset)); + __ ldr(scratch2_, + ContextOperand( + scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); + __ cmp(scratch1_, scratch2_); + __ b(ne, &false_result); + + // Set the bit in the map to indicate that it has been checked safe for + // default valueOf and set true result. + __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); + __ orr(scratch1_, + scratch1_, + Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); + __ mov(map_result_, Operand(1)); + __ jmp(exit_label()); + __ bind(&false_result); + // Set false result. + __ mov(map_result_, Operand(0, RelocInfo::NONE)); + } + + private: + Register object_; + Register map_result_; + Register scratch1_; + Register scratch2_; +}; + + +void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf( + ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register obj = frame_->PopToRegister(); // Pop the string wrapper. + if (FLAG_debug_code) { + __ AbortIfSmi(obj); + } + + // Check whether this map has already been checked to be safe for default + // valueOf. + Register map_result = VirtualFrame::scratch0(); + __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset)); + __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset)); + __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + true_target()->Branch(ne); + + // We need an additional two scratch registers for the deferred code. + Register scratch1 = VirtualFrame::scratch1(); + // Use r6 without notifying the virtual frame. + Register scratch2 = r6; + + DeferredIsStringWrapperSafeForDefaultValueOf* deferred = + new DeferredIsStringWrapperSafeForDefaultValueOf( + obj, map_result, scratch1, scratch2); + deferred->Branch(eq); + deferred->BindExit(); + __ tst(map_result, Operand(map_result)); + cc_reg_ = ne; +} + + +void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { + // This generates a fast version of: + // (%_ClassOf(arg) === 'Function') + ASSERT(args->length() == 1); + Load(args->at(0)); + Register possible_function = frame_->PopToRegister(); + __ tst(possible_function, Operand(kSmiTagMask)); + false_target()->Branch(eq); + Register map_reg = VirtualFrame::scratch0(); + Register scratch = VirtualFrame::scratch1(); + __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register possible_undetectable = frame_->PopToRegister(); + __ tst(possible_undetectable, Operand(kSmiTagMask)); + false_target()->Branch(eq); + Register scratch = VirtualFrame::scratch0(); + __ ldr(scratch, + FieldMemOperand(possible_undetectable, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ tst(scratch, Operand(1 << Map::kIsUndetectable)); + cc_reg_ = ne; +} + + +void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); + + Register scratch0 = VirtualFrame::scratch0(); + Register scratch1 = VirtualFrame::scratch1(); + // Get the frame pointer for the calling frame. + __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + __ ldr(scratch1, + MemOperand(scratch0, StandardFrameConstants::kContextOffset)); + __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ ldr(scratch0, + MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq); + + // Check the marker in the calling frame. + __ ldr(scratch1, + MemOperand(scratch0, StandardFrameConstants::kMarkerOffset)); + __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); + + Register tos = frame_->GetTOSRegister(); + Register scratch0 = VirtualFrame::scratch0(); + Register scratch1 = VirtualFrame::scratch1(); + + // Check if the calling frame is an arguments adaptor frame. + __ ldr(scratch0, + MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(scratch1, + MemOperand(scratch0, StandardFrameConstants::kContextOffset)); + __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Get the number of formal parameters. + __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ ldr(tos, + MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset), + eq); + + frame_->EmitPush(tos); +} + + +void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + + // Satisfy contract with ArgumentsAccessStub: + // Load the key into r1 and the formal parameters count into r0. + Load(args->at(0)); + frame_->PopToR1(); + frame_->SpillAll(); + __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters()))); + + // Call the shared stub to get to arguments[key]. + ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + frame_->CallStub(&stub, 0); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateRandomHeapNumber( + ZoneList<Expression*>* args) { + VirtualFrame::SpilledScope spilled_scope(frame_); + ASSERT(args->length() == 0); + + Label slow_allocate_heapnumber; + Label heapnumber_allocated; + + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber); + __ jmp(&heapnumber_allocated); + + __ bind(&slow_allocate_heapnumber); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r4, Operand(r0)); + + __ bind(&heapnumber_allocated); + + // Convert 32 random bits in r0 to 0.(32 random bits) in a double + // by computing: + // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). + if (CpuFeatures::IsSupported(VFP3)) { + __ PrepareCallCFunction(0, r1); + __ CallCFunction(ExternalReference::random_uint32_function(), 0); + + CpuFeatures::Scope scope(VFP3); + // 0x41300000 is the top half of 1.0 x 2^20 as a double. + // Create this constant using mov/orr to avoid PC relative load. + __ mov(r1, Operand(0x41000000)); + __ orr(r1, r1, Operand(0x300000)); + // Move 0x41300000xxxxxxxx (x = random bits) to VFP. + __ vmov(d7, r0, r1); + // Move 0x4130000000000000 to VFP. + __ mov(r0, Operand(0, RelocInfo::NONE)); + __ vmov(d8, r0, r1); + // Subtract and store the result in the heap number. + __ vsub(d7, d7, d8); + __ sub(r0, r4, Operand(kHeapObjectTag)); + __ vstr(d7, r0, HeapNumber::kValueOffset); + frame_->EmitPush(r4); + } else { + __ mov(r0, Operand(r4)); + __ PrepareCallCFunction(1, r1); + __ CallCFunction( + ExternalReference::fill_heap_number_with_random_function(), 1); + frame_->EmitPush(r0); + } +} + + +void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) { + ASSERT_EQ(2, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + + StringAddStub stub(NO_STRING_ADD_FLAGS); + frame_->SpillAll(); + frame_->CallStub(&stub, 2); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) { + ASSERT_EQ(3, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + Load(args->at(2)); + + SubStringStub stub; + frame_->SpillAll(); + frame_->CallStub(&stub, 3); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) { + ASSERT_EQ(2, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + + StringCompareStub stub; + frame_->SpillAll(); + frame_->CallStub(&stub, 2); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) { + ASSERT_EQ(4, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + Load(args->at(2)); + Load(args->at(3)); + RegExpExecStub stub; + frame_->SpillAll(); + frame_->CallStub(&stub, 4); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) { + ASSERT_EQ(3, args->length()); + + Load(args->at(0)); // Size of array, smi. + Load(args->at(1)); // "index" property value. + Load(args->at(2)); // "input" property value. + RegExpConstructResultStub stub; + frame_->SpillAll(); + frame_->CallStub(&stub, 3); + frame_->EmitPush(r0); +} + + +class DeferredSearchCache: public DeferredCode { + public: + DeferredSearchCache(Register dst, Register cache, Register key) + : dst_(dst), cache_(cache), key_(key) { + set_comment("[ DeferredSearchCache"); + } + + virtual void Generate(); + + private: + Register dst_, cache_, key_; +}; + + +void DeferredSearchCache::Generate() { + __ Push(cache_, key_); + __ CallRuntime(Runtime::kGetFromCache, 2); + __ Move(dst_, r0); +} + + +void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) { + ASSERT_EQ(2, args->length()); + + ASSERT_NE(NULL, args->at(0)->AsLiteral()); + int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); + + Handle<FixedArray> jsfunction_result_caches( + Top::global_context()->jsfunction_result_caches()); + if (jsfunction_result_caches->length() <= cache_id) { + __ Abort("Attempt to use undefined cache."); + frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex); + return; + } + + Load(args->at(1)); + + frame_->PopToR1(); + frame_->SpillAll(); + Register key = r1; // Just poped to r1 + Register result = r0; // Free, as frame has just been spilled. + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + + __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ ldr(scratch1, + FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset)); + __ ldr(scratch1, + ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX)); + __ ldr(scratch1, + FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id))); + + DeferredSearchCache* deferred = + new DeferredSearchCache(result, scratch1, key); + + const int kFingerOffset = + FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ ldr(result, FieldMemOperand(scratch1, kFingerOffset)); + // result now holds finger offset as a smi. + __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + // scratch2 now points to the start of fixed array elements. + __ ldr(result, + MemOperand( + scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex)); + // Note side effect of PreIndex: scratch2 now points to the key of the pair. + __ cmp(key, result); + deferred->Branch(ne); + + __ ldr(result, MemOperand(scratch2, kPointerSize)); + + deferred->BindExit(); + frame_->EmitPush(result); +} + + +void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) { + ASSERT_EQ(args->length(), 1); + + // Load the argument on the stack and jump to the runtime. + Load(args->at(0)); + + NumberToStringStub stub; + frame_->SpillAll(); + frame_->CallStub(&stub, 1); + frame_->EmitPush(r0); +} + + +class DeferredSwapElements: public DeferredCode { + public: + DeferredSwapElements(Register object, Register index1, Register index2) + : object_(object), index1_(index1), index2_(index2) { + set_comment("[ DeferredSwapElements"); + } + + virtual void Generate(); + + private: + Register object_, index1_, index2_; +}; + + +void DeferredSwapElements::Generate() { + __ push(object_); + __ push(index1_); + __ push(index2_); + __ CallRuntime(Runtime::kSwapElements, 3); +} + + +void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) { + Comment cmnt(masm_, "[ GenerateSwapElements"); + + ASSERT_EQ(3, args->length()); + + Load(args->at(0)); + Load(args->at(1)); + Load(args->at(2)); + + VirtualFrame::SpilledScope spilled_scope(frame_); + + Register index2 = r2; + Register index1 = r1; + Register object = r0; + Register tmp1 = r3; + Register tmp2 = r4; + + frame_->EmitPop(index2); + frame_->EmitPop(index1); + frame_->EmitPop(object); + + DeferredSwapElements* deferred = + new DeferredSwapElements(object, index1, index2); + + // Fetch the map and check if array is in fast case. + // Check that object doesn't require security checks and + // has no indexed interceptor. + __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE); + deferred->Branch(lt); + __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset)); + __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); + deferred->Branch(ne); + + // Check the object's elements are in fast case and writable. + __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset)); + __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(tmp2, ip); + deferred->Branch(ne); + + // Smi-tagging is equivalent to multiplying by 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + + // Check that both indices are smis. + __ mov(tmp2, index1); + __ orr(tmp2, tmp2, index2); + __ tst(tmp2, Operand(kSmiTagMask)); + deferred->Branch(ne); + + // Check that both indices are valid. + __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset)); + __ cmp(tmp2, index1); + __ cmp(tmp2, index2, hi); + deferred->Branch(ls); + + // Bring the offsets into the fixed array in tmp1 into index1 and + // index2. + __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize)); + + // Swap elements. + Register tmp3 = object; + object = no_reg; + __ ldr(tmp3, MemOperand(tmp1, index1)); + __ ldr(tmp2, MemOperand(tmp1, index2)); + __ str(tmp3, MemOperand(tmp1, index2)); + __ str(tmp2, MemOperand(tmp1, index1)); + + Label done; + __ InNewSpace(tmp1, tmp2, eq, &done); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + __ mov(tmp2, tmp1); + __ add(index1, index1, tmp1); + __ add(index2, index2, tmp1); + __ RecordWriteHelper(tmp1, index1, tmp3); + __ RecordWriteHelper(tmp2, index2, tmp3); + __ bind(&done); + + deferred->BindExit(); + __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex); + frame_->EmitPush(tmp1); +} + + +void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) { + Comment cmnt(masm_, "[ GenerateCallFunction"); + + ASSERT(args->length() >= 2); + + int n_args = args->length() - 2; // for receiver and function. + Load(args->at(0)); // receiver + for (int i = 0; i < n_args; i++) { + Load(args->at(i + 1)); + } + Load(args->at(n_args + 1)); // function + frame_->CallJSFunction(n_args); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { + ASSERT_EQ(args->length(), 1); + Load(args->at(0)); + if (CpuFeatures::IsSupported(VFP3)) { + TranscendentalCacheStub stub(TranscendentalCache::SIN); + frame_->SpillAllButCopyTOSToR0(); + frame_->CallStub(&stub, 1); + } else { + frame_->CallRuntime(Runtime::kMath_sin, 1); + } + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { + ASSERT_EQ(args->length(), 1); + Load(args->at(0)); + if (CpuFeatures::IsSupported(VFP3)) { + TranscendentalCacheStub stub(TranscendentalCache::COS); + frame_->SpillAllButCopyTOSToR0(); + frame_->CallStub(&stub, 1); + } else { + frame_->CallRuntime(Runtime::kMath_cos, 1); + } + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) { + ASSERT_EQ(args->length(), 1); + Load(args->at(0)); + if (CpuFeatures::IsSupported(VFP3)) { + TranscendentalCacheStub stub(TranscendentalCache::LOG); + frame_->SpillAllButCopyTOSToR0(); + frame_->CallStub(&stub, 1); + } else { + frame_->CallRuntime(Runtime::kMath_log, 1); + } + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { + ASSERT(args->length() == 2); + + // Load the two objects into registers and perform the comparison. + Load(args->at(0)); + Load(args->at(1)); + Register lhs = frame_->PopToRegister(); + Register rhs = frame_->PopToRegister(lhs); + __ cmp(lhs, rhs); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) { + ASSERT(args->length() == 2); + + // Load the two objects into registers and perform the comparison. + Load(args->at(0)); + Load(args->at(1)); + Register right = frame_->PopToRegister(); + Register left = frame_->PopToRegister(right); + Register tmp = frame_->scratch0(); + Register tmp2 = frame_->scratch1(); + + // Jumps to done must have the eq flag set if the test is successful + // and clear if the test has failed. + Label done; + + // Fail if either is a non-HeapObject. + __ cmp(left, Operand(right)); + __ b(eq, &done); + __ and_(tmp, left, Operand(right)); + __ eor(tmp, tmp, Operand(kSmiTagMask)); + __ tst(tmp, Operand(kSmiTagMask)); + __ b(ne, &done); + __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset)); + __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); + __ cmp(tmp2, Operand(JS_REGEXP_TYPE)); + __ b(ne, &done); + __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ cmp(tmp, Operand(tmp2)); + __ b(ne, &done); + __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset)); + __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset)); + __ cmp(tmp, tmp2); + __ bind(&done); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register value = frame_->PopToRegister(); + Register tmp = frame_->scratch0(); + __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset)); + __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask)); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register value = frame_->PopToRegister(); + + __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset)); + __ IndexFromHash(value, value); + frame_->EmitPush(value); +} + + +void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) { + ASSERT(args->length() == 2); + Load(args->at(0)); + Register value = frame_->PopToRegister(); + __ LoadRoot(value, Heap::kUndefinedValueRootIndex); + frame_->EmitPush(value); +} + + +void CodeGenerator::VisitCallRuntime(CallRuntime* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + if (CheckForInlineRuntimeCall(node)) { + ASSERT((has_cc() && frame_->height() == original_height) || + (!has_cc() && frame_->height() == original_height + 1)); + return; + } + + ZoneList<Expression*>* args = node->arguments(); + Comment cmnt(masm_, "[ CallRuntime"); + Runtime::Function* function = node->function(); + + if (function == NULL) { + // Prepare stack for calling JS runtime function. + // Push the builtins object found in the current global object. + Register scratch = VirtualFrame::scratch0(); + __ ldr(scratch, GlobalObjectOperand()); + Register builtins = frame_->GetTOSRegister(); + __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset)); + frame_->EmitPush(builtins); + } + + // Push the arguments ("left-to-right"). + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Load(args->at(i)); + } + + VirtualFrame::SpilledScope spilled_scope(frame_); + + if (function == NULL) { + // Call the JS runtime function. + __ mov(r2, Operand(node->name())); + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop); + frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); + __ ldr(cp, frame_->Context()); + frame_->EmitPush(r0); + } else { + // Call the C runtime function. + frame_->CallRuntime(function, arg_count); + frame_->EmitPush(r0); + } + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ UnaryOperation"); + + Token::Value op = node->op(); + + if (op == Token::NOT) { + LoadCondition(node->expression(), false_target(), true_target(), true); + // LoadCondition may (and usually does) leave a test and branch to + // be emitted by the caller. In that case, negate the condition. + if (has_cc()) cc_reg_ = NegateCondition(cc_reg_); + + } else if (op == Token::DELETE) { + Property* property = node->expression()->AsProperty(); + Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); + if (property != NULL) { + Load(property->obj()); + Load(property->key()); + frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag()))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); + frame_->EmitPush(r0); + + } else if (variable != NULL) { + // Delete of an unqualified identifier is disallowed in strict mode + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); + Slot* slot = variable->AsSlot(); + if (variable->is_global()) { + LoadGlobal(); + frame_->EmitPush(Operand(variable->name())); + frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode))); + frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3); + frame_->EmitPush(r0); + + } else if (slot != NULL && slot->type() == Slot::LOOKUP) { + // Delete from the context holding the named variable. + frame_->EmitPush(cp); + frame_->EmitPush(Operand(variable->name())); + frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); + frame_->EmitPush(r0); + + } else { + // Default: Result of deleting non-global, not dynamically + // introduced variables is false. + frame_->EmitPushRoot(Heap::kFalseValueRootIndex); + } + + } else { + // Default: Result of deleting expressions is true. + Load(node->expression()); // may have side-effects + frame_->Drop(); + frame_->EmitPushRoot(Heap::kTrueValueRootIndex); + } + + } else if (op == Token::TYPEOF) { + // Special case for loading the typeof expression; see comment on + // LoadTypeofExpression(). + LoadTypeofExpression(node->expression()); + frame_->CallRuntime(Runtime::kTypeof, 1); + frame_->EmitPush(r0); // r0 has result + + } else { + bool can_overwrite = node->expression()->ResultOverwriteAllowed(); + UnaryOverwriteMode overwrite = + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; + + bool no_negative_zero = node->expression()->no_negative_zero(); + Load(node->expression()); + switch (op) { + case Token::NOT: + case Token::DELETE: + case Token::TYPEOF: + UNREACHABLE(); // handled above + break; + + case Token::SUB: { + frame_->PopToR0(); + GenericUnaryOpStub stub( + Token::SUB, + overwrite, + NO_UNARY_FLAGS, + no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); + frame_->CallStub(&stub, 0); + frame_->EmitPush(r0); // r0 has result + break; + } + + case Token::BIT_NOT: { + Register tos = frame_->PopToRegister(); + JumpTarget not_smi_label; + JumpTarget continue_label; + // Smi check. + __ tst(tos, Operand(kSmiTagMask)); + not_smi_label.Branch(ne); + + __ mvn(tos, Operand(tos)); + __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag. + frame_->EmitPush(tos); + // The fast case is the first to jump to the continue label, so it gets + // to decide the virtual frame layout. + continue_label.Jump(); + + not_smi_label.Bind(); + frame_->SpillAll(); + __ Move(r0, tos); + GenericUnaryOpStub stub(Token::BIT_NOT, + overwrite, + NO_UNARY_SMI_CODE_IN_STUB); + frame_->CallStub(&stub, 0); + frame_->EmitPush(r0); + + continue_label.Bind(); + break; + } + + case Token::VOID: + frame_->Drop(); + frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex); + break; + + case Token::ADD: { + Register tos = frame_->Peek(); + // Smi check. + JumpTarget continue_label; + __ tst(tos, Operand(kSmiTagMask)); + continue_label.Branch(eq); + + frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1); + frame_->EmitPush(r0); + + continue_label.Bind(); + break; + } + default: + UNREACHABLE(); + } + } + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height) || + (!has_cc() && frame_->height() == original_height + 1)); +} + + +class DeferredCountOperation: public DeferredCode { + public: + DeferredCountOperation(Register value, + bool is_increment, + bool is_postfix, + int target_size) + : value_(value), + is_increment_(is_increment), + is_postfix_(is_postfix), + target_size_(target_size) {} + + virtual void Generate() { + VirtualFrame copied_frame(*frame_state()->frame()); + + Label slow; + // Check for smi operand. + __ tst(value_, Operand(kSmiTagMask)); + __ b(ne, &slow); + + // Revert optimistic increment/decrement. + if (is_increment_) { + __ sub(value_, value_, Operand(Smi::FromInt(1))); + } else { + __ add(value_, value_, Operand(Smi::FromInt(1))); + } + + // Slow case: Convert to number. At this point the + // value to be incremented is in the value register.. + __ bind(&slow); + + // Convert the operand to a number. + copied_frame.EmitPush(value_); + + copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1); + + if (is_postfix_) { + // Postfix: store to result (on the stack). + __ str(r0, MemOperand(sp, target_size_ * kPointerSize)); + } + + copied_frame.EmitPush(r0); + copied_frame.EmitPush(Operand(Smi::FromInt(1))); + + if (is_increment_) { + copied_frame.CallRuntime(Runtime::kNumberAdd, 2); + } else { + copied_frame.CallRuntime(Runtime::kNumberSub, 2); + } + + __ Move(value_, r0); + + copied_frame.MergeTo(frame_state()->frame()); + } + + private: + Register value_; + bool is_increment_; + bool is_postfix_; + int target_size_; +}; + + +void CodeGenerator::VisitCountOperation(CountOperation* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ CountOperation"); + VirtualFrame::RegisterAllocationScope scope(this); + + bool is_postfix = node->is_postfix(); + bool is_increment = node->op() == Token::INC; + + Variable* var = node->expression()->AsVariableProxy()->AsVariable(); + bool is_const = (var != NULL && var->mode() == Variable::CONST); + bool is_slot = (var != NULL && var->mode() == Variable::VAR); + + if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) { + // The type info declares that this variable is always a Smi. That + // means it is a Smi both before and after the increment/decrement. + // Lets make use of that to make a very minimal count. + Reference target(this, node->expression(), !is_const); + ASSERT(!target.is_illegal()); + target.GetValue(); // Pushes the value. + Register value = frame_->PopToRegister(); + if (is_postfix) frame_->EmitPush(value); + if (is_increment) { + __ add(value, value, Operand(Smi::FromInt(1))); + } else { + __ sub(value, value, Operand(Smi::FromInt(1))); + } + frame_->EmitPush(value); + target.SetValue(NOT_CONST_INIT, LIKELY_SMI); + if (is_postfix) frame_->Pop(); + ASSERT_EQ(original_height + 1, frame_->height()); + return; + } + + // If it's a postfix expression and its result is not ignored and the + // reference is non-trivial, then push a placeholder on the stack now + // to hold the result of the expression. + bool placeholder_pushed = false; + if (!is_slot && is_postfix) { + frame_->EmitPush(Operand(Smi::FromInt(0))); + placeholder_pushed = true; + } + + // A constant reference is not saved to, so a constant reference is not a + // compound assignment reference. + { Reference target(this, node->expression(), !is_const); + if (target.is_illegal()) { + // Spoof the virtual frame to have the expected height (one higher + // than on entry). + if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0))); + ASSERT_EQ(original_height + 1, frame_->height()); + return; + } + + // This pushes 0, 1 or 2 words on the object to be used later when updating + // the target. It also pushes the current value of the target. + target.GetValue(); + + bool value_is_known_smi = frame_->KnownSmiAt(0); + Register value = frame_->PopToRegister(); + + // Postfix: Store the old value as the result. + if (placeholder_pushed) { + frame_->SetElementAt(value, target.size()); + } else if (is_postfix) { + frame_->EmitPush(value); + __ mov(VirtualFrame::scratch0(), value); + value = VirtualFrame::scratch0(); + } + + // We can't use any type information here since the virtual frame from the + // deferred code may have lost information and we can't merge a virtual + // frame with less specific type knowledge to a virtual frame with more + // specific knowledge that has already used that specific knowledge to + // generate code. + frame_->ForgetTypeInfo(); + + // The constructor here will capture the current virtual frame and use it to + // merge to after the deferred code has run. No virtual frame changes are + // allowed from here until the 'BindExit' below. + DeferredCode* deferred = + new DeferredCountOperation(value, + is_increment, + is_postfix, + target.size()); + if (!value_is_known_smi) { + // Check for smi operand. + __ tst(value, Operand(kSmiTagMask)); + + deferred->Branch(ne); + } + + // Perform optimistic increment/decrement. + if (is_increment) { + __ add(value, value, Operand(Smi::FromInt(1)), SetCC); + } else { + __ sub(value, value, Operand(Smi::FromInt(1)), SetCC); + } + + // If increment/decrement overflows, go to deferred code. + deferred->Branch(vs); + + deferred->BindExit(); + + // Store the new value in the target if not const. + // At this point the answer is in the value register. + frame_->EmitPush(value); + // Set the target with the result, leaving the result on + // top of the stack. Removes the target from the stack if + // it has a non-zero size. + if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI); + } + + // Postfix: Discard the new value and use the old. + if (is_postfix) frame_->Pop(); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { + // According to ECMA-262 section 11.11, page 58, the binary logical + // operators must yield the result of one of the two expressions + // before any ToBoolean() conversions. This means that the value + // produced by a && or || operator is not necessarily a boolean. + + // NOTE: If the left hand side produces a materialized value (not in + // the CC register), we force the right hand side to do the + // same. This is necessary because we may have to branch to the exit + // after evaluating the left hand side (due to the shortcut + // semantics), but the compiler must (statically) know if the result + // of compiling the binary operation is materialized or not. + if (node->op() == Token::AND) { + JumpTarget is_true; + LoadCondition(node->left(), &is_true, false_target(), false); + if (has_valid_frame() && !has_cc()) { + // The left-hand side result is on top of the virtual frame. + JumpTarget pop_and_continue; + JumpTarget exit; + + frame_->Dup(); + // Avoid popping the result if it converts to 'false' using the + // standard ToBoolean() conversion as described in ECMA-262, + // section 9.2, page 30. + ToBoolean(&pop_and_continue, &exit); + Branch(false, &exit); + + // Pop the result of evaluating the first part. + pop_and_continue.Bind(); + frame_->Pop(); + + // Evaluate right side expression. + is_true.Bind(); + Load(node->right()); + + // Exit (always with a materialized value). + exit.Bind(); + } else if (has_cc() || is_true.is_linked()) { + // The left-hand side is either (a) partially compiled to + // control flow with a final branch left to emit or (b) fully + // compiled to control flow and possibly true. + if (has_cc()) { + Branch(false, false_target()); + } + is_true.Bind(); + LoadCondition(node->right(), true_target(), false_target(), false); + } else { + // Nothing to do. + ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked()); + } + + } else { + ASSERT(node->op() == Token::OR); + JumpTarget is_false; + LoadCondition(node->left(), true_target(), &is_false, false); + if (has_valid_frame() && !has_cc()) { + // The left-hand side result is on top of the virtual frame. + JumpTarget pop_and_continue; + JumpTarget exit; + + frame_->Dup(); + // Avoid popping the result if it converts to 'true' using the + // standard ToBoolean() conversion as described in ECMA-262, + // section 9.2, page 30. + ToBoolean(&exit, &pop_and_continue); + Branch(true, &exit); + + // Pop the result of evaluating the first part. + pop_and_continue.Bind(); + frame_->Pop(); + + // Evaluate right side expression. + is_false.Bind(); + Load(node->right()); + + // Exit (always with a materialized value). + exit.Bind(); + } else if (has_cc() || is_false.is_linked()) { + // The left-hand side is either (a) partially compiled to + // control flow with a final branch left to emit or (b) fully + // compiled to control flow and possibly false. + if (has_cc()) { + Branch(true, true_target()); + } + is_false.Bind(); + LoadCondition(node->right(), true_target(), false_target(), false); + } else { + // Nothing to do. + ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked()); + } + } +} + + +void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ BinaryOperation"); + + if (node->op() == Token::AND || node->op() == Token::OR) { + GenerateLogicalBooleanOperation(node); + } else { + // Optimize for the case where (at least) one of the expressions + // is a literal small integer. + Literal* lliteral = node->left()->AsLiteral(); + Literal* rliteral = node->right()->AsLiteral(); + // NOTE: The code below assumes that the slow cases (calls to runtime) + // never return a constant/immutable object. + bool overwrite_left = node->left()->ResultOverwriteAllowed(); + bool overwrite_right = node->right()->ResultOverwriteAllowed(); + + if (rliteral != NULL && rliteral->handle()->IsSmi()) { + VirtualFrame::RegisterAllocationScope scope(this); + Load(node->left()); + if (frame_->KnownSmiAt(0)) overwrite_left = false; + SmiOperation(node->op(), + rliteral->handle(), + false, + overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); + } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { + VirtualFrame::RegisterAllocationScope scope(this); + Load(node->right()); + if (frame_->KnownSmiAt(0)) overwrite_right = false; + SmiOperation(node->op(), + lliteral->handle(), + true, + overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); + } else { + GenerateInlineSmi inline_smi = + loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI; + if (lliteral != NULL) { + ASSERT(!lliteral->handle()->IsSmi()); + inline_smi = DONT_GENERATE_INLINE_SMI; + } + if (rliteral != NULL) { + ASSERT(!rliteral->handle()->IsSmi()); + inline_smi = DONT_GENERATE_INLINE_SMI; + } + VirtualFrame::RegisterAllocationScope scope(this); + OverwriteMode overwrite_mode = NO_OVERWRITE; + if (overwrite_left) { + overwrite_mode = OVERWRITE_LEFT; + } else if (overwrite_right) { + overwrite_mode = OVERWRITE_RIGHT; + } + Load(node->left()); + Load(node->right()); + GenericBinaryOperation(node->op(), overwrite_mode, inline_smi); + } + } + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height) || + (!has_cc() && frame_->height() == original_height + 1)); +} + + +void CodeGenerator::VisitThisFunction(ThisFunction* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + frame_->EmitPush(MemOperand(frame_->Function())); + ASSERT_EQ(original_height + 1, frame_->height()); +} + + +void CodeGenerator::VisitCompareOperation(CompareOperation* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ CompareOperation"); + + VirtualFrame::RegisterAllocationScope nonspilled_scope(this); + + // Get the expressions from the node. + Expression* left = node->left(); + Expression* right = node->right(); + Token::Value op = node->op(); + + // To make typeof testing for natives implemented in JavaScript really + // efficient, we generate special code for expressions of the form: + // 'typeof <expression> == <string>'. + UnaryOperation* operation = left->AsUnaryOperation(); + if ((op == Token::EQ || op == Token::EQ_STRICT) && + (operation != NULL && operation->op() == Token::TYPEOF) && + (right->AsLiteral() != NULL && + right->AsLiteral()->handle()->IsString())) { + Handle<String> check(String::cast(*right->AsLiteral()->handle())); + + // Load the operand, move it to a register. + LoadTypeofExpression(operation->expression()); + Register tos = frame_->PopToRegister(); + + Register scratch = VirtualFrame::scratch0(); + + if (check->Equals(Heap::number_symbol())) { + __ tst(tos, Operand(kSmiTagMask)); + true_target()->Branch(eq); + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(tos, ip); + cc_reg_ = eq; + + } else if (check->Equals(Heap::string_symbol())) { + __ tst(tos, Operand(kSmiTagMask)); + false_target()->Branch(eq); + + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + + // It can be an undetectable string object. + __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); + false_target()->Branch(eq); + + __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); + cc_reg_ = lt; + + } else if (check->Equals(Heap::boolean_symbol())) { + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(tos, ip); + true_target()->Branch(eq); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(tos, ip); + cc_reg_ = eq; + + } else if (check->Equals(Heap::undefined_symbol())) { + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(tos, ip); + true_target()->Branch(eq); + + __ tst(tos, Operand(kSmiTagMask)); + false_target()->Branch(eq); + + // It can be an undetectable object. + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); + + cc_reg_ = eq; + + } else if (check->Equals(Heap::function_symbol())) { + __ tst(tos, Operand(kSmiTagMask)); + false_target()->Branch(eq); + Register map_reg = scratch; + __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE); + true_target()->Branch(eq); + // Regular expressions are callable so typeof == 'function'. + __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE); + cc_reg_ = eq; + + } else if (check->Equals(Heap::object_symbol())) { + __ tst(tos, Operand(kSmiTagMask)); + false_target()->Branch(eq); + + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(tos, ip); + true_target()->Branch(eq); + + Register map_reg = scratch; + __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE); + false_target()->Branch(eq); + + // It can be an undetectable object. + __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); + false_target()->Branch(eq); + + __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); + __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE)); + false_target()->Branch(lt); + __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE)); + cc_reg_ = le; + + } else { + // Uncommon case: typeof testing against a string literal that is + // never returned from the typeof operator. + false_target()->Jump(); + } + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height)); + return; + } + + switch (op) { + case Token::EQ: + Comparison(eq, left, right, false); + break; + + case Token::LT: + Comparison(lt, left, right); + break; + + case Token::GT: + Comparison(gt, left, right); + break; + + case Token::LTE: + Comparison(le, left, right); + break; + + case Token::GTE: + Comparison(ge, left, right); + break; + + case Token::EQ_STRICT: + Comparison(eq, left, right, true); + break; + + case Token::IN: { + Load(left); + Load(right); + frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2); + frame_->EmitPush(r0); + break; + } + + case Token::INSTANCEOF: { + Load(left); + Load(right); + InstanceofStub stub(InstanceofStub::kNoFlags); + frame_->CallStub(&stub, 2); + // At this point if instanceof succeeded then r0 == 0. + __ tst(r0, Operand(r0)); + cc_reg_ = eq; + break; + } + + default: + UNREACHABLE(); + } + ASSERT((has_cc() && frame_->height() == original_height) || + (!has_cc() && frame_->height() == original_height + 1)); +} + + +void CodeGenerator::VisitCompareToNull(CompareToNull* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ CompareToNull"); + + Load(node->expression()); + Register tos = frame_->PopToRegister(); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(tos, ip); + + // The 'null' value is only equal to 'undefined' if using non-strict + // comparisons. + if (!node->is_strict()) { + true_target()->Branch(eq); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(tos, Operand(ip)); + true_target()->Branch(eq); + + __ tst(tos, Operand(kSmiTagMask)); + false_target()->Branch(eq); + + // It can be an undetectable object. + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); + } + + cc_reg_ = eq; + ASSERT(has_cc() && frame_->height() == original_height); +} + + +class DeferredReferenceGetNamedValue: public DeferredCode { + public: + explicit DeferredReferenceGetNamedValue(Register receiver, + Handle<String> name, + bool is_contextual) + : receiver_(receiver), + name_(name), + is_contextual_(is_contextual), + is_dont_delete_(false) { + set_comment(is_contextual + ? "[ DeferredReferenceGetNamedValue (contextual)" + : "[ DeferredReferenceGetNamedValue"); + } + + virtual void Generate(); + + void set_is_dont_delete(bool value) { + ASSERT(is_contextual_); + is_dont_delete_ = value; + } + + private: + Register receiver_; + Handle<String> name_; + bool is_contextual_; + bool is_dont_delete_; +}; + + +// Convention for this is that on entry the receiver is in a register that +// is not used by the stack. On exit the answer is found in that same +// register and the stack has the same height. +void DeferredReferenceGetNamedValue::Generate() { +#ifdef DEBUG + int expected_height = frame_state()->frame()->height(); +#endif + VirtualFrame copied_frame(*frame_state()->frame()); + copied_frame.SpillAll(); + + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2)); + __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2); + __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2); + + // Ensure receiver in r0 and name in r2 to match load ic calling convention. + __ Move(r0, receiver_); + __ mov(r2, Operand(name_)); + + // The rest of the instructions in the deferred code must be together. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + RelocInfo::Mode mode = is_contextual_ + ? RelocInfo::CODE_TARGET_CONTEXT + : RelocInfo::CODE_TARGET; + __ Call(ic, mode); + // We must mark the code just after the call with the correct marker. + MacroAssembler::NopMarkerTypes code_marker; + if (is_contextual_) { + code_marker = is_dont_delete_ + ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE + : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT; + } else { + code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED; + } + __ MarkCode(code_marker); + + // At this point the answer is in r0. We move it to the expected register + // if necessary. + __ Move(receiver_, r0); + + // Now go back to the frame that we entered with. This will not overwrite + // the receiver register since that register was not in use when we came + // in. The instructions emitted by this merge are skipped over by the + // inline load patching mechanism when looking for the branch instruction + // that tells it where the code to patch is. + copied_frame.MergeTo(frame_state()->frame()); + + // Block the constant pool for one more instruction after leaving this + // constant pool block scope to include the branch instruction ending the + // deferred code. + __ BlockConstPoolFor(1); + } + ASSERT_EQ(expected_height, frame_state()->frame()->height()); +} + + +class DeferredReferenceGetKeyedValue: public DeferredCode { + public: + DeferredReferenceGetKeyedValue(Register key, Register receiver) + : key_(key), receiver_(receiver) { + set_comment("[ DeferredReferenceGetKeyedValue"); + } + + virtual void Generate(); + + private: + Register key_; + Register receiver_; +}; + + +// Takes key and register in r0 and r1 or vice versa. Returns result +// in r0. +void DeferredReferenceGetKeyedValue::Generate() { + ASSERT((key_.is(r0) && receiver_.is(r1)) || + (key_.is(r1) && receiver_.is(r0))); + + VirtualFrame copied_frame(*frame_state()->frame()); + copied_frame.SpillAll(); + + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); + __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2); + + // Ensure key in r0 and receiver in r1 to match keyed load ic calling + // convention. + if (key_.is(r1)) { + __ Swap(r0, r1, ip); + } + + // The rest of the instructions in the deferred code must be together. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + // Call keyed load IC. It has the arguments key and receiver in r0 and r1. + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // The call must be followed by a nop instruction to indicate that the + // keyed load has been inlined. + __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED); + + // Now go back to the frame that we entered with. This will not overwrite + // the receiver or key registers since they were not in use when we came + // in. The instructions emitted by this merge are skipped over by the + // inline load patching mechanism when looking for the branch instruction + // that tells it where the code to patch is. + copied_frame.MergeTo(frame_state()->frame()); + + // Block the constant pool for one more instruction after leaving this + // constant pool block scope to include the branch instruction ending the + // deferred code. + __ BlockConstPoolFor(1); + } +} + + +class DeferredReferenceSetKeyedValue: public DeferredCode { + public: + DeferredReferenceSetKeyedValue(Register value, + Register key, + Register receiver, + StrictModeFlag strict_mode) + : value_(value), + key_(key), + receiver_(receiver), + strict_mode_(strict_mode) { + set_comment("[ DeferredReferenceSetKeyedValue"); + } + + virtual void Generate(); + + private: + Register value_; + Register key_; + Register receiver_; + StrictModeFlag strict_mode_; +}; + + +void DeferredReferenceSetKeyedValue::Generate() { + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2); + __ IncrementCounter( + &Counters::keyed_store_inline_miss, 1, scratch1, scratch2); + + // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic + // calling convention. + if (value_.is(r1)) { + __ Swap(r0, r1, ip); + } + ASSERT(receiver_.is(r2)); + + // The rest of the instructions in the deferred code must be together. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + // Call keyed store IC. It has the arguments value, key and receiver in r0, + // r1 and r2. + Handle<Code> ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // The call must be followed by a nop instruction to indicate that the + // keyed store has been inlined. + __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED); + + // Block the constant pool for one more instruction after leaving this + // constant pool block scope to include the branch instruction ending the + // deferred code. + __ BlockConstPoolFor(1); + } +} + + +class DeferredReferenceSetNamedValue: public DeferredCode { + public: + DeferredReferenceSetNamedValue(Register value, + Register receiver, + Handle<String> name, + StrictModeFlag strict_mode) + : value_(value), + receiver_(receiver), + name_(name), + strict_mode_(strict_mode) { + set_comment("[ DeferredReferenceSetNamedValue"); + } + + virtual void Generate(); + + private: + Register value_; + Register receiver_; + Handle<String> name_; + StrictModeFlag strict_mode_; +}; + + +// Takes value in r0, receiver in r1 and returns the result (the +// value) in r0. +void DeferredReferenceSetNamedValue::Generate() { + // Record the entry frame and spill. + VirtualFrame copied_frame(*frame_state()->frame()); + copied_frame.SpillAll(); + + // Ensure value in r0, receiver in r1 to match store ic calling + // convention. + ASSERT(value_.is(r0) && receiver_.is(r1)); + __ mov(r2, Operand(name_)); + + // The rest of the instructions in the deferred code must be together. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + // Call keyed store IC. It has the arguments value, key and receiver in r0, + // r1 and r2. + Handle<Code> ic(Builtins::builtin( + (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // The call must be followed by a nop instruction to indicate that the + // named store has been inlined. + __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED); + + // Go back to the frame we entered with. The instructions + // generated by this merge are skipped over by the inline store + // patching mechanism when looking for the branch instruction that + // tells it where the code to patch is. + copied_frame.MergeTo(frame_state()->frame()); + + // Block the constant pool for one more instruction after leaving this + // constant pool block scope to include the branch instruction ending the + // deferred code. + __ BlockConstPoolFor(1); + } +} + + +// Consumes the top of stack (the receiver) and pushes the result instead. +void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { + bool contextual_load_in_builtin = + is_contextual && + (Bootstrapper::IsActive() || + (!info_->closure().is_null() && info_->closure()->IsBuiltin())); + + if (scope()->is_global_scope() || + loop_nesting() == 0 || + contextual_load_in_builtin) { + Comment cmnt(masm(), "[ Load from named Property"); + // Setup the name register and call load IC. + frame_->CallLoadIC(name, + is_contextual + ? RelocInfo::CODE_TARGET_CONTEXT + : RelocInfo::CODE_TARGET); + frame_->EmitPush(r0); // Push answer. + } else { + // Inline the in-object property case. + Comment cmnt(masm(), is_contextual + ? "[ Inlined contextual property load" + : "[ Inlined named property load"); + + // Counter will be decremented in the deferred code. Placed here to avoid + // having it in the instruction stream below where patching will occur. + if (is_contextual) { + __ IncrementCounter(&Counters::named_load_global_inline, 1, + frame_->scratch0(), frame_->scratch1()); + } else { + __ IncrementCounter(&Counters::named_load_inline, 1, + frame_->scratch0(), frame_->scratch1()); + } + + // The following instructions are the inlined load of an in-object property. + // Parts of this code is patched, so the exact instructions generated needs + // to be fixed. Therefore the instruction pool is blocked when generating + // this code + + // Load the receiver from the stack. + Register receiver = frame_->PopToRegister(); + + DeferredReferenceGetNamedValue* deferred = + new DeferredReferenceGetNamedValue(receiver, name, is_contextual); + + bool is_dont_delete = false; + if (is_contextual) { + if (!info_->closure().is_null()) { + // When doing lazy compilation we can check if the global cell + // already exists and use its "don't delete" status as a hint. + AssertNoAllocation no_gc; + v8::internal::GlobalObject* global_object = + info_->closure()->context()->global(); + LookupResult lookup; + global_object->LocalLookupRealNamedProperty(*name, &lookup); + if (lookup.IsProperty() && lookup.type() == NORMAL) { + ASSERT(lookup.holder() == global_object); + ASSERT(global_object->property_dictionary()->ValueAt( + lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell()); + is_dont_delete = lookup.IsDontDelete(); + } + } + if (is_dont_delete) { + __ IncrementCounter(&Counters::dont_delete_hint_hit, 1, + frame_->scratch0(), frame_->scratch1()); + } + } + + { Assembler::BlockConstPoolScope block_const_pool(masm_); + if (!is_contextual) { + // Check that the receiver is a heap object. + __ tst(receiver, Operand(kSmiTagMask)); + deferred->Branch(eq); + } + + // Check for the_hole_value if necessary. + // Below we rely on the number of instructions generated, and we can't + // cope with the Check macro which does not generate a fixed number of + // instructions. + Label skip, check_the_hole, cont; + if (FLAG_debug_code && is_contextual && is_dont_delete) { + __ b(&skip); + __ bind(&check_the_hole); + __ Check(ne, "DontDelete cells can't contain the hole"); + __ b(&cont); + __ bind(&skip); + } + +#ifdef DEBUG + int InlinedNamedLoadInstructions = 5; + Label check_inlined_codesize; + masm_->bind(&check_inlined_codesize); +#endif + + Register scratch = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + + // Check the map. The null map used below is patched by the inline cache + // code. Therefore we can't use a LoadRoot call. + __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ mov(scratch2, Operand(Factory::null_value())); + __ cmp(scratch, scratch2); + deferred->Branch(ne); + + if (is_contextual) { +#ifdef DEBUG + InlinedNamedLoadInstructions += 1; +#endif + // Load the (initially invalid) cell and get its value. + masm()->mov(receiver, Operand(Factory::null_value())); + __ ldr(receiver, + FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset)); + + deferred->set_is_dont_delete(is_dont_delete); + + if (!is_dont_delete) { +#ifdef DEBUG + InlinedNamedLoadInstructions += 3; +#endif + __ cmp(receiver, Operand(Factory::the_hole_value())); + deferred->Branch(eq); + } else if (FLAG_debug_code) { +#ifdef DEBUG + InlinedNamedLoadInstructions += 3; +#endif + __ cmp(receiver, Operand(Factory::the_hole_value())); + __ b(&check_the_hole, eq); + __ bind(&cont); + } + } else { + // Initially use an invalid index. The index will be patched by the + // inline cache code. + __ ldr(receiver, MemOperand(receiver, 0)); + } + + // Make sure that the expected number of instructions are generated. + // If the code before is updated, the offsets in ic-arm.cc + // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need + // to be updated. + ASSERT_EQ(InlinedNamedLoadInstructions, + masm_->InstructionsGeneratedSince(&check_inlined_codesize)); + } + + deferred->BindExit(); + // At this point the receiver register has the result, either from the + // deferred code or from the inlined code. + frame_->EmitPush(receiver); + } +} + + +void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { +#ifdef DEBUG + int expected_height = frame()->height() - (is_contextual ? 1 : 2); +#endif + + Result result; + if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { + frame()->CallStoreIC(name, is_contextual, strict_mode_flag()); + } else { + // Inline the in-object property case. + JumpTarget slow, done; + + // Get the value and receiver from the stack. + frame()->PopToR0(); + Register value = r0; + frame()->PopToR1(); + Register receiver = r1; + + DeferredReferenceSetNamedValue* deferred = + new DeferredReferenceSetNamedValue( + value, receiver, name, strict_mode_flag()); + + // Check that the receiver is a heap object. + __ tst(receiver, Operand(kSmiTagMask)); + deferred->Branch(eq); + + // The following instructions are the part of the inlined + // in-object property store code which can be patched. Therefore + // the exact number of instructions generated must be fixed, so + // the constant pool is blocked while generating this code. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + Register scratch0 = VirtualFrame::scratch0(); + Register scratch1 = VirtualFrame::scratch1(); + + // Check the map. Initially use an invalid map to force a + // failure. The map check will be patched in the runtime system. + __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); + +#ifdef DEBUG + Label check_inlined_codesize; + masm_->bind(&check_inlined_codesize); +#endif + __ mov(scratch0, Operand(Factory::null_value())); + __ cmp(scratch0, scratch1); + deferred->Branch(ne); + + int offset = 0; + __ str(value, MemOperand(receiver, offset)); + + // Update the write barrier and record its size. We do not use + // the RecordWrite macro here because we want the offset + // addition instruction first to make it easy to patch. + Label record_write_start, record_write_done; + __ bind(&record_write_start); + // Add offset into the object. + __ add(scratch0, receiver, Operand(offset)); + // Test that the object is not in the new space. We cannot set + // region marks for new space pages. + __ InNewSpace(receiver, scratch1, eq, &record_write_done); + // Record the actual write. + __ RecordWriteHelper(receiver, scratch0, scratch1); + __ bind(&record_write_done); + // Clobber all input registers when running with the debug-code flag + // turned on to provoke errors. + if (FLAG_debug_code) { + __ mov(receiver, Operand(BitCast<int32_t>(kZapValue))); + __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue))); + __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue))); + } + // Check that this is the first inlined write barrier or that + // this inlined write barrier has the same size as all the other + // inlined write barriers. + ASSERT((inlined_write_barrier_size_ == -1) || + (inlined_write_barrier_size_ == + masm()->InstructionsGeneratedSince(&record_write_start))); + inlined_write_barrier_size_ = + masm()->InstructionsGeneratedSince(&record_write_start); + + // Make sure that the expected number of instructions are generated. + ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(), + masm()->InstructionsGeneratedSince(&check_inlined_codesize)); + } + deferred->BindExit(); + } + ASSERT_EQ(expected_height, frame()->height()); +} + + +void CodeGenerator::EmitKeyedLoad() { + if (loop_nesting() == 0) { + Comment cmnt(masm_, "[ Load from keyed property"); + frame_->CallKeyedLoadIC(); + } else { + // Inline the keyed load. + Comment cmnt(masm_, "[ Inlined load from keyed property"); + + // Counter will be decremented in the deferred code. Placed here to avoid + // having it in the instruction stream below where patching will occur. + __ IncrementCounter(&Counters::keyed_load_inline, 1, + frame_->scratch0(), frame_->scratch1()); + + // Load the key and receiver from the stack. + bool key_is_known_smi = frame_->KnownSmiAt(0); + Register key = frame_->PopToRegister(); + Register receiver = frame_->PopToRegister(key); + + // The deferred code expects key and receiver in registers. + DeferredReferenceGetKeyedValue* deferred = + new DeferredReferenceGetKeyedValue(key, receiver); + + // Check that the receiver is a heap object. + __ tst(receiver, Operand(kSmiTagMask)); + deferred->Branch(eq); + + // The following instructions are the part of the inlined load keyed + // property code which can be patched. Therefore the exact number of + // instructions generated need to be fixed, so the constant pool is blocked + // while generating this code. + { Assembler::BlockConstPoolScope block_const_pool(masm_); + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + // Check the map. The null map used below is patched by the inline cache + // code. + __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); + + // Check that the key is a smi. + if (!key_is_known_smi) { + __ tst(key, Operand(kSmiTagMask)); + deferred->Branch(ne); + } + +#ifdef DEBUG + Label check_inlined_codesize; + masm_->bind(&check_inlined_codesize); +#endif + __ mov(scratch2, Operand(Factory::null_value())); + __ cmp(scratch1, scratch2); + deferred->Branch(ne); + + // Get the elements array from the receiver. + __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ AssertFastElements(scratch1); + + // Check that key is within bounds. Use unsigned comparison to handle + // negative keys. + __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); + __ cmp(scratch2, key); + deferred->Branch(ls); // Unsigned less equal. + + // Load and check that the result is not the hole (key is a smi). + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); + __ add(scratch1, + scratch1, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(scratch1, + MemOperand(scratch1, key, LSL, + kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); + __ cmp(scratch1, scratch2); + deferred->Branch(eq); + + __ mov(r0, scratch1); + // Make sure that the expected number of instructions are generated. + ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(), + masm_->InstructionsGeneratedSince(&check_inlined_codesize)); + } + + deferred->BindExit(); + } +} + + +void CodeGenerator::EmitKeyedStore(StaticType* key_type, + WriteBarrierCharacter wb_info) { + // Generate inlined version of the keyed store if the code is in a loop + // and the key is likely to be a smi. + if (loop_nesting() > 0 && key_type->IsLikelySmi()) { + // Inline the keyed store. + Comment cmnt(masm_, "[ Inlined store to keyed property"); + + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + Register scratch3 = r3; + + // Counter will be decremented in the deferred code. Placed here to avoid + // having it in the instruction stream below where patching will occur. + __ IncrementCounter(&Counters::keyed_store_inline, 1, + scratch1, scratch2); + + + + // Load the value, key and receiver from the stack. + bool value_is_harmless = frame_->KnownSmiAt(0); + if (wb_info == NEVER_NEWSPACE) value_is_harmless = true; + bool key_is_smi = frame_->KnownSmiAt(1); + Register value = frame_->PopToRegister(); + Register key = frame_->PopToRegister(value); + VirtualFrame::SpilledScope spilled(frame_); + Register receiver = r2; + frame_->EmitPop(receiver); + +#ifdef DEBUG + bool we_remembered_the_write_barrier = value_is_harmless; +#endif + + // The deferred code expects value, key and receiver in registers. + DeferredReferenceSetKeyedValue* deferred = + new DeferredReferenceSetKeyedValue( + value, key, receiver, strict_mode_flag()); + + // Check that the value is a smi. As this inlined code does not set the + // write barrier it is only possible to store smi values. + if (!value_is_harmless) { + // If the value is not likely to be a Smi then let's test the fixed array + // for new space instead. See below. + if (wb_info == LIKELY_SMI) { + __ tst(value, Operand(kSmiTagMask)); + deferred->Branch(ne); +#ifdef DEBUG + we_remembered_the_write_barrier = true; +#endif + } + } + + if (!key_is_smi) { + // Check that the key is a smi. + __ tst(key, Operand(kSmiTagMask)); + deferred->Branch(ne); + } + + // Check that the receiver is a heap object. + __ tst(receiver, Operand(kSmiTagMask)); + deferred->Branch(eq); + + // Check that the receiver is a JSArray. + __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE); + deferred->Branch(ne); + + // Check that the key is within bounds. Both the key and the length of + // the JSArray are smis. Use unsigned comparison to handle negative keys. + __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ cmp(scratch1, key); + deferred->Branch(ls); // Unsigned less equal. + + // Get the elements array from the receiver. + __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); + if (!value_is_harmless && wb_info != LIKELY_SMI) { + Label ok; + __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask())); + __ cmp(scratch2, Operand(ExternalReference::new_space_start())); + __ tst(value, Operand(kSmiTagMask), ne); + deferred->Branch(ne); +#ifdef DEBUG + we_remembered_the_write_barrier = true; +#endif + } + // Check that the elements array is not a dictionary. + __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); + // The following instructions are the part of the inlined store keyed + // property code which can be patched. Therefore the exact number of + // instructions generated need to be fixed, so the constant pool is blocked + // while generating this code. + { Assembler::BlockConstPoolScope block_const_pool(masm_); +#ifdef DEBUG + Label check_inlined_codesize; + masm_->bind(&check_inlined_codesize); +#endif + + // Read the fixed array map from the constant pool (not from the root + // array) so that the value can be patched. When debugging, we patch this + // comparison to always fail so that we will hit the IC call in the + // deferred code which will allow the debugger to break for fast case + // stores. + __ mov(scratch3, Operand(Factory::fixed_array_map())); + __ cmp(scratch2, scratch3); + deferred->Branch(ne); + + // Store the value. + __ add(scratch1, scratch1, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ str(value, + MemOperand(scratch1, key, LSL, + kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); + + // Make sure that the expected number of instructions are generated. + ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch, + masm_->InstructionsGeneratedSince(&check_inlined_codesize)); + } + + ASSERT(we_remembered_the_write_barrier); + + // Make sure that r0 holds the value which is the result of the expression. + __ Move(r0, value); + + deferred->BindExit(); + } else { + frame()->CallKeyedStoreIC(strict_mode_flag()); + } +} + + +#ifdef DEBUG +bool CodeGenerator::HasValidEntryRegisters() { return true; } +#endif + + +#undef __ +#define __ ACCESS_MASM(masm) + +Handle<String> Reference::GetName() { + ASSERT(type_ == NAMED); + Property* property = expression_->AsProperty(); + if (property == NULL) { + // Global variable reference treated as a named property reference. + VariableProxy* proxy = expression_->AsVariableProxy(); + ASSERT(proxy->AsVariable() != NULL); + ASSERT(proxy->AsVariable()->is_global()); + return proxy->name(); + } else { + Literal* raw_name = property->key()->AsLiteral(); + ASSERT(raw_name != NULL); + return Handle<String>(String::cast(*raw_name->handle())); + } +} + + +void Reference::DupIfPersist() { + if (persist_after_get_) { + switch (type_) { + case KEYED: + cgen_->frame()->Dup2(); + break; + case NAMED: + cgen_->frame()->Dup(); + // Fall through. + case UNLOADED: + case ILLEGAL: + case SLOT: + // Do nothing. + ; + } + } else { + set_unloaded(); + } +} + + +void Reference::GetValue() { + ASSERT(cgen_->HasValidEntryRegisters()); + ASSERT(!is_illegal()); + ASSERT(!cgen_->has_cc()); + MacroAssembler* masm = cgen_->masm(); + Property* property = expression_->AsProperty(); + if (property != NULL) { + cgen_->CodeForSourcePosition(property->position()); + } + + switch (type_) { + case SLOT: { + Comment cmnt(masm, "[ Load from Slot"); + Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot(); + ASSERT(slot != NULL); + DupIfPersist(); + cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); + break; + } + + case NAMED: { + Variable* var = expression_->AsVariableProxy()->AsVariable(); + bool is_global = var != NULL; + ASSERT(!is_global || var->is_global()); + Handle<String> name = GetName(); + DupIfPersist(); + cgen_->EmitNamedLoad(name, is_global); + break; + } + + case KEYED: { + ASSERT(property != NULL); + DupIfPersist(); + cgen_->EmitKeyedLoad(); + cgen_->frame()->EmitPush(r0); + break; + } + + default: + UNREACHABLE(); + } +} + + +void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) { + ASSERT(!is_illegal()); + ASSERT(!cgen_->has_cc()); + MacroAssembler* masm = cgen_->masm(); + VirtualFrame* frame = cgen_->frame(); + Property* property = expression_->AsProperty(); + if (property != NULL) { + cgen_->CodeForSourcePosition(property->position()); + } + + switch (type_) { + case SLOT: { + Comment cmnt(masm, "[ Store to Slot"); + Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot(); + cgen_->StoreToSlot(slot, init_state); + set_unloaded(); + break; + } + + case NAMED: { + Comment cmnt(masm, "[ Store to named Property"); + cgen_->EmitNamedStore(GetName(), false); + frame->EmitPush(r0); + set_unloaded(); + break; + } + + case KEYED: { + Comment cmnt(masm, "[ Store to keyed Property"); + Property* property = expression_->AsProperty(); + ASSERT(property != NULL); + cgen_->CodeForSourcePosition(property->position()); + cgen_->EmitKeyedStore(property->key()->type(), wb_info); + frame->EmitPush(r0); + set_unloaded(); + break; + } + + default: + UNREACHABLE(); + } +} + + +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int len = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(len); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + + OS::SNPrintF(Vector<char>(name_, len), + "GenericBinaryOpStub_%s_%s%s_%s", + op_name, + overwrite_name, + specialized_on_rhs_ ? "_ConstantRhs" : "", + BinaryOpIC::GetName(runtime_operands_type_)); + return name_; +} + + +#undef __ + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 01aa8052e1..8f46256b8a 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -37,8 +37,162 @@ namespace internal { // Forward declarations class CompilationInfo; +class DeferredCode; +class JumpTarget; +class RegisterAllocator; +class RegisterFile; +enum InitState { CONST_INIT, NOT_CONST_INIT }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; +enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI }; +enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE }; + + +// ------------------------------------------------------------------------- +// Reference support + +// A reference is a C++ stack-allocated object that puts a +// reference on the virtual frame. The reference may be consumed +// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference. +// When the lifetime (scope) of a valid reference ends, it must have +// been consumed, and be in state UNLOADED. +class Reference BASE_EMBEDDED { + public: + // The values of the types is important, see size(). + enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 }; + Reference(CodeGenerator* cgen, + Expression* expression, + bool persist_after_get = false); + ~Reference(); + + Expression* expression() const { return expression_; } + Type type() const { return type_; } + void set_type(Type value) { + ASSERT_EQ(ILLEGAL, type_); + type_ = value; + } + + void set_unloaded() { + ASSERT_NE(ILLEGAL, type_); + ASSERT_NE(UNLOADED, type_); + type_ = UNLOADED; + } + // The size the reference takes up on the stack. + int size() const { + return (type_ < SLOT) ? 0 : type_; + } + + bool is_illegal() const { return type_ == ILLEGAL; } + bool is_slot() const { return type_ == SLOT; } + bool is_property() const { return type_ == NAMED || type_ == KEYED; } + bool is_unloaded() const { return type_ == UNLOADED; } + + // Return the name. Only valid for named property references. + Handle<String> GetName(); + + // Generate code to push the value of the reference on top of the + // expression stack. The reference is expected to be already on top of + // the expression stack, and it is consumed by the call unless the + // reference is for a compound assignment. + // If the reference is not consumed, it is left in place under its value. + void GetValue(); + + // Generate code to store the value on top of the expression stack in the + // reference. The reference is expected to be immediately below the value + // on the expression stack. The value is stored in the location specified + // by the reference, and is left on top of the stack, after the reference + // is popped from beneath it (unloaded). + void SetValue(InitState init_state, WriteBarrierCharacter wb); + + // This is in preparation for something that uses the reference on the stack. + // If we need this reference afterwards get then dup it now. Otherwise mark + // it as used. + inline void DupIfPersist(); + + private: + CodeGenerator* cgen_; + Expression* expression_; + Type type_; + // Keep the reference on the stack after get, so it can be used by set later. + bool persist_after_get_; +}; + + +// ------------------------------------------------------------------------- +// Code generation state + +// The state is passed down the AST by the code generator (and back up, in +// the form of the state of the label pair). It is threaded through the +// call stack. Constructing a state implicitly pushes it on the owning code +// generator's stack of states, and destroying one implicitly pops it. + +class CodeGenState BASE_EMBEDDED { + public: + // Create an initial code generator state. Destroying the initial state + // leaves the code generator with a NULL state. + explicit CodeGenState(CodeGenerator* owner); + + // Destroy a code generator state and restore the owning code generator's + // previous state. + virtual ~CodeGenState(); + + virtual JumpTarget* true_target() const { return NULL; } + virtual JumpTarget* false_target() const { return NULL; } + + protected: + inline CodeGenerator* owner() { return owner_; } + inline CodeGenState* previous() const { return previous_; } + + private: + CodeGenerator* owner_; + CodeGenState* previous_; +}; + + +class ConditionCodeGenState : public CodeGenState { + public: + // Create a code generator state based on a code generator's current + // state. The new state has its own pair of branch labels. + ConditionCodeGenState(CodeGenerator* owner, + JumpTarget* true_target, + JumpTarget* false_target); + + virtual JumpTarget* true_target() const { return true_target_; } + virtual JumpTarget* false_target() const { return false_target_; } + + private: + JumpTarget* true_target_; + JumpTarget* false_target_; +}; + + +class TypeInfoCodeGenState : public CodeGenState { + public: + TypeInfoCodeGenState(CodeGenerator* owner, + Slot* slot_number, + TypeInfo info); + ~TypeInfoCodeGenState(); + + virtual JumpTarget* true_target() const { return previous()->true_target(); } + virtual JumpTarget* false_target() const { + return previous()->false_target(); + } + + private: + Slot* slot_; + TypeInfo old_type_info_; +}; + + +// ------------------------------------------------------------------------- +// Arguments allocation mode + +enum ArgumentsAllocationMode { + NO_ARGUMENTS_ALLOCATION, + EAGER_ARGUMENTS_ALLOCATION, + LAZY_ARGUMENTS_ALLOCATION +}; + // ------------------------------------------------------------------------- // CodeGenerator @@ -71,17 +225,367 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + // Accessors + MacroAssembler* masm() { return masm_; } + VirtualFrame* frame() const { return frame_; } + inline Handle<Script> script(); + + bool has_valid_frame() const { return frame_ != NULL; } + + // Set the virtual frame to be new_frame, with non-frame register + // reference counts given by non_frame_registers. The non-frame + // register reference counts of the old frame are returned in + // non_frame_registers. + void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers); + + void DeleteFrame(); + + RegisterAllocator* allocator() const { return allocator_; } + + CodeGenState* state() { return state_; } + void set_state(CodeGenState* state) { state_ = state; } + + TypeInfo type_info(Slot* slot) { + int index = NumberOfSlot(slot); + if (index == kInvalidSlotNumber) return TypeInfo::Unknown(); + return (*type_info_)[index]; + } + + TypeInfo set_type_info(Slot* slot, TypeInfo info) { + int index = NumberOfSlot(slot); + ASSERT(index >= kInvalidSlotNumber); + if (index != kInvalidSlotNumber) { + TypeInfo previous_value = (*type_info_)[index]; + (*type_info_)[index] = info; + return previous_value; + } + return TypeInfo::Unknown(); + } + + void AddDeferred(DeferredCode* code) { deferred_.Add(code); } + // Constants related to patching of inlined load/store. static int GetInlinedKeyedLoadInstructionsAfterPatch() { return FLAG_debug_code ? 32 : 13; } - static const int kInlinedKeyedStoreInstructionsAfterPatch = 8; + static const int kInlinedKeyedStoreInstructionsAfterPatch = 5; static int GetInlinedNamedStoreInstructionsAfterPatch() { - ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); - return Isolate::Current()->inlined_write_barrier_size() + 4; + ASSERT(inlined_write_barrier_size_ != -1); + return inlined_write_barrier_size_ + 4; } private: + // Type of a member function that generates inline code for a native function. + typedef void (CodeGenerator::*InlineFunctionGenerator) + (ZoneList<Expression*>*); + + static const InlineFunctionGenerator kInlineFunctionGenerators[]; + + // Construction/Destruction + explicit CodeGenerator(MacroAssembler* masm); + + // Accessors + inline bool is_eval(); + inline Scope* scope(); + inline StrictModeFlag strict_mode_flag(); + + // Generating deferred code. + void ProcessDeferred(); + + static const int kInvalidSlotNumber = -1; + + int NumberOfSlot(Slot* slot); + + // State + bool has_cc() const { return cc_reg_ != al; } + JumpTarget* true_target() const { return state_->true_target(); } + JumpTarget* false_target() const { return state_->false_target(); } + + // Track loop nesting level. + int loop_nesting() const { return loop_nesting_; } + void IncrementLoopNesting() { loop_nesting_++; } + void DecrementLoopNesting() { loop_nesting_--; } + + // Node visitors. + void VisitStatements(ZoneList<Statement*>* statements); + + virtual void VisitSlot(Slot* node); +#define DEF_VISIT(type) \ + virtual void Visit##type(type* node); + AST_NODE_LIST(DEF_VISIT) +#undef DEF_VISIT + + // Main code generation function + void Generate(CompilationInfo* info); + + // Generate the return sequence code. Should be called no more than + // once per compiled function, immediately after binding the return + // target (which can not be done more than once). The return value should + // be in r0. + void GenerateReturnSequence(); + + // Returns the arguments allocation mode. + ArgumentsAllocationMode ArgumentsMode(); + + // Store the arguments object and allocate it if necessary. + void StoreArgumentsObject(bool initial); + + // The following are used by class Reference. + void LoadReference(Reference* ref); + void UnloadReference(Reference* ref); + + MemOperand SlotOperand(Slot* slot, Register tmp); + + MemOperand ContextSlotOperandCheckExtensions(Slot* slot, + Register tmp, + Register tmp2, + JumpTarget* slow); + + // Expressions + void LoadCondition(Expression* x, + JumpTarget* true_target, + JumpTarget* false_target, + bool force_cc); + void Load(Expression* expr); + void LoadGlobal(); + void LoadGlobalReceiver(Register scratch); + + // Read a value from a slot and leave it on top of the expression stack. + void LoadFromSlot(Slot* slot, TypeofState typeof_state); + void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state); + + // Store the value on top of the stack to a slot. + void StoreToSlot(Slot* slot, InitState init_state); + + // Support for compiling assignment expressions. + void EmitSlotAssignment(Assignment* node); + void EmitNamedPropertyAssignment(Assignment* node); + void EmitKeyedPropertyAssignment(Assignment* node); + + // Load a named property, returning it in r0. The receiver is passed on the + // stack, and remains there. + void EmitNamedLoad(Handle<String> name, bool is_contextual); + + // Store to a named property. If the store is contextual, value is passed on + // the frame and consumed. Otherwise, receiver and value are passed on the + // frame and consumed. The result is returned in r0. + void EmitNamedStore(Handle<String> name, bool is_contextual); + + // Load a keyed property, leaving it in r0. The receiver and key are + // passed on the stack, and remain there. + void EmitKeyedLoad(); + + // Store a keyed property. Key and receiver are on the stack and the value is + // in r0. Result is returned in r0. + void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info); + + void LoadFromGlobalSlotCheckExtensions(Slot* slot, + TypeofState typeof_state, + JumpTarget* slow); + + // Support for loading from local/global variables and arguments + // whose location is known unless they are shadowed by + // eval-introduced bindings. Generates no code for unsupported slot + // types and therefore expects to fall through to the slow jump target. + void EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + JumpTarget* slow, + JumpTarget* done); + + // Special code for typeof expressions: Unfortunately, we must + // be careful when loading the expression in 'typeof' + // expressions. We are not allowed to throw reference errors for + // non-existing properties of the global object, so we must make it + // look like an explicit property access, instead of an access + // through the context chain. + void LoadTypeofExpression(Expression* x); + + void ToBoolean(JumpTarget* true_target, JumpTarget* false_target); + + // Generate code that computes a shortcutting logical operation. + void GenerateLogicalBooleanOperation(BinaryOperation* node); + + void GenericBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + GenerateInlineSmi inline_smi, + int known_rhs = + GenericBinaryOpStub::kUnknownIntValue); + void Comparison(Condition cc, + Expression* left, + Expression* right, + bool strict = false); + + void SmiOperation(Token::Value op, + Handle<Object> value, + bool reversed, + OverwriteMode mode); + + void CallWithArguments(ZoneList<Expression*>* arguments, + CallFunctionFlags flags, + int position); + + // An optimized implementation of expressions of the form + // x.apply(y, arguments). We call x the applicand and y the receiver. + // The optimization avoids allocating an arguments object if possible. + void CallApplyLazy(Expression* applicand, + Expression* receiver, + VariableProxy* arguments, + int position); + + // Control flow + void Branch(bool if_true, JumpTarget* target); + void CheckStack(); + + bool CheckForInlineRuntimeCall(CallRuntime* node); + + static Handle<Code> ComputeLazyCompile(int argc); + void ProcessDeclarations(ZoneList<Declaration*>* declarations); + + // Declare global variables and functions in the given array of + // name/value pairs. + void DeclareGlobals(Handle<FixedArray> pairs); + + // Instantiate the function based on the shared function info. + void InstantiateFunction(Handle<SharedFunctionInfo> function_info, + bool pretenure); + + // Support for type checks. + void GenerateIsSmi(ZoneList<Expression*>* args); + void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args); + void GenerateIsArray(ZoneList<Expression*>* args); + void GenerateIsRegExp(ZoneList<Expression*>* args); + void GenerateIsObject(ZoneList<Expression*>* args); + void GenerateIsSpecObject(ZoneList<Expression*>* args); + void GenerateIsFunction(ZoneList<Expression*>* args); + void GenerateIsUndetectableObject(ZoneList<Expression*>* args); + void GenerateIsStringWrapperSafeForDefaultValueOf( + ZoneList<Expression*>* args); + + // Support for construct call checks. + void GenerateIsConstructCall(ZoneList<Expression*>* args); + + // Support for arguments.length and arguments[?]. + void GenerateArgumentsLength(ZoneList<Expression*>* args); + void GenerateArguments(ZoneList<Expression*>* args); + + // Support for accessing the class and value fields of an object. + void GenerateClassOf(ZoneList<Expression*>* args); + void GenerateValueOf(ZoneList<Expression*>* args); + void GenerateSetValueOf(ZoneList<Expression*>* args); + + // Fast support for charCodeAt(n). + void GenerateStringCharCodeAt(ZoneList<Expression*>* args); + + // Fast support for string.charAt(n) and string[n]. + void GenerateStringCharFromCode(ZoneList<Expression*>* args); + + // Fast support for string.charAt(n) and string[n]. + void GenerateStringCharAt(ZoneList<Expression*>* args); + + // Fast support for object equality testing. + void GenerateObjectEquals(ZoneList<Expression*>* args); + + void GenerateLog(ZoneList<Expression*>* args); + + // Fast support for Math.random(). + void GenerateRandomHeapNumber(ZoneList<Expression*>* args); + + // Fast support for StringAdd. + void GenerateStringAdd(ZoneList<Expression*>* args); + + // Fast support for SubString. + void GenerateSubString(ZoneList<Expression*>* args); + + // Fast support for StringCompare. + void GenerateStringCompare(ZoneList<Expression*>* args); + + // Support for direct calls from JavaScript to native RegExp code. + void GenerateRegExpExec(ZoneList<Expression*>* args); + + void GenerateRegExpConstructResult(ZoneList<Expression*>* args); + + // Support for fast native caches. + void GenerateGetFromCache(ZoneList<Expression*>* args); + + // Fast support for number to string. + void GenerateNumberToString(ZoneList<Expression*>* args); + + // Fast swapping of elements. + void GenerateSwapElements(ZoneList<Expression*>* args); + + // Fast call for custom callbacks. + void GenerateCallFunction(ZoneList<Expression*>* args); + + // Fast call to math functions. + void GenerateMathPow(ZoneList<Expression*>* args); + void GenerateMathSin(ZoneList<Expression*>* args); + void GenerateMathCos(ZoneList<Expression*>* args); + void GenerateMathSqrt(ZoneList<Expression*>* args); + void GenerateMathLog(ZoneList<Expression*>* args); + + void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args); + + void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args); + void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args); + void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args); + + // Simple condition analysis. + enum ConditionAnalysis { + ALWAYS_TRUE, + ALWAYS_FALSE, + DONT_KNOW + }; + ConditionAnalysis AnalyzeCondition(Expression* cond); + + // Methods used to indicate which source code is generated for. Source + // positions are collected by the assembler and emitted with the relocation + // information. + void CodeForFunctionPosition(FunctionLiteral* fun); + void CodeForReturnPosition(FunctionLiteral* fun); + void CodeForStatementPosition(Statement* node); + void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); + void CodeForSourcePosition(int pos); + +#ifdef DEBUG + // True if the registers are valid for entry to a block. + bool HasValidEntryRegisters(); +#endif + + List<DeferredCode*> deferred_; + + // Assembler + MacroAssembler* masm_; // to generate code + + CompilationInfo* info_; + + // Code generation state + VirtualFrame* frame_; + RegisterAllocator* allocator_; + Condition cc_reg_; + CodeGenState* state_; + int loop_nesting_; + + Vector<TypeInfo>* type_info_; + + // Jump targets + BreakTarget function_return_; + + // True if the function return is shadowed (ie, jumping to the target + // function_return_ does not jump to the true function return, but rather + // to some unlinking code). + bool function_return_is_shadowed_; + + // Size of inlined write barriers generated by EmitNamedStore. + static int inlined_write_barrier_size_; + + friend class VirtualFrame; + friend class JumpTarget; + friend class Reference; + friend class FastCodeGenerator; + friend class FullCodeGenerator; + friend class FullCodeGenSyntaxChecker; + friend class LCodeGen; + DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 823c6ff7e1..e6033a8977 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,9 +28,12 @@ #ifndef V8_ARM_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_ -// ARM EABI is required. -#if defined(__arm__) && !defined(__ARM_EABI__) -#error ARM EABI support is required. +// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we +// are not running on real ARM hardware. One reason for this is that the +// old ABI uses fp registers in the calling convention and the simulator does +// not simulate fp registers or coroutine instructions. +#if defined(__ARM_EABI__) || !defined(__arm__) +# define USE_ARM_EABI 1 #endif // This means that interwork-compatible jump instructions are generated. We @@ -86,11 +89,6 @@ namespace v8 { namespace internal { -// Constant pool marker. -static const int kConstantPoolMarkerMask = 0xffe00000; -static const int kConstantPoolMarker = 0x0c000000; -static const int kConstantPoolLengthMask = 0x001ffff; - // Number of registers in normal ARM mode. static const int kNumRegisters = 16; @@ -343,9 +341,7 @@ enum BlockAddrMode { da_x = (0|0|0) << 21, // Decrement after. ia_x = (0|4|0) << 21, // Increment after. db_x = (8|0|0) << 21, // Decrement before. - ib_x = (8|4|0) << 21, // Increment before. - - kBlockAddrModeMask = (8|4|1) << 21 + ib_x = (8|4|0) << 21 // Increment before. }; @@ -392,11 +388,9 @@ enum VFPConversionMode { // This mask does not include the "inexact" or "input denormal" cumulative // exceptions flags, because we usually don't want to check for it. static const uint32_t kVFPExceptionMask = 0xf; -static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0; -static const uint32_t kVFPOverflowExceptionBit = 1 << 2; -static const uint32_t kVFPUnderflowExceptionBit = 1 << 3; static const uint32_t kVFPInexactExceptionBit = 1 << 4; static const uint32_t kVFPFlushToZeroMask = 1 << 24; +static const uint32_t kVFPInvalidExceptionBit = 1; static const uint32_t kVFPNConditionFlagBit = 1 << 31; static const uint32_t kVFPZConditionFlagBit = 1 << 30; diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index 51cfeb6c87..51c84b3354 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -42,12 +42,10 @@ namespace v8 { namespace internal { void CPU::Setup() { - CpuFeatures::Probe(); -} - - -bool CPU::SupportsCrankshaft() { - return CpuFeatures::IsSupported(VFP3); + CpuFeatures::Probe(true); + if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) { + V8::DisableCrankshaft(); + } } @@ -63,7 +61,7 @@ void CPU::FlushICache(void* start, size_t size) { // that the Icache was flushed. // None of this code ends up in the snapshot so there are no issues // around whether or not to generate the code when building snapshots. - Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size); + Simulator::FlushICache(start, size); #else // Ideally, we would call // syscall(__ARM_NR_cacheflush, start, @@ -75,33 +73,62 @@ void CPU::FlushICache(void* start, size_t size) { register uint32_t end asm("a2") = reinterpret_cast<uint32_t>(start) + size; register uint32_t flg asm("a3") = 0; - #if defined (__arm__) && !defined(__thumb__) - // __arm__ may be defined in thumb mode. - register uint32_t scno asm("r7") = __ARM_NR_cacheflush; - asm volatile( - "svc 0x0" - : "=r" (beg) - : "0" (beg), "r" (end), "r" (flg), "r" (scno)); + #ifdef __ARM_EABI__ + #if defined (__arm__) && !defined(__thumb__) + // __arm__ may be defined in thumb mode. + register uint32_t scno asm("r7") = __ARM_NR_cacheflush; + asm volatile( + "svc 0x0" + : "=r" (beg) + : "0" (beg), "r" (end), "r" (flg), "r" (scno)); + #else + // r7 is reserved by the EABI in thumb mode. + asm volatile( + "@ Enter ARM Mode \n\t" + "adr r3, 1f \n\t" + "bx r3 \n\t" + ".ALIGN 4 \n\t" + ".ARM \n" + "1: push {r7} \n\t" + "mov r7, %4 \n\t" + "svc 0x0 \n\t" + "pop {r7} \n\t" + "@ Enter THUMB Mode\n\t" + "adr r3, 2f+1 \n\t" + "bx r3 \n\t" + ".THUMB \n" + "2: \n\t" + : "=r" (beg) + : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush) + : "r3"); + #endif #else - // r7 is reserved by the EABI in thumb mode. - asm volatile( - "@ Enter ARM Mode \n\t" - "adr r3, 1f \n\t" - "bx r3 \n\t" - ".ALIGN 4 \n\t" - ".ARM \n" - "1: push {r7} \n\t" - "mov r7, %4 \n\t" - "svc 0x0 \n\t" - "pop {r7} \n\t" - "@ Enter THUMB Mode\n\t" - "adr r3, 2f+1 \n\t" - "bx r3 \n\t" - ".THUMB \n" - "2: \n\t" - : "=r" (beg) - : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush) - : "r3"); + #if defined (__arm__) && !defined(__thumb__) + // __arm__ may be defined in thumb mode. + asm volatile( + "svc %1" + : "=r" (beg) + : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg)); + #else + // Do not use the value of __ARM_NR_cacheflush in the inline assembly + // below, because the thumb mode value would be used, which would be + // wrong, since we switch to ARM mode before executing the svc instruction + asm volatile( + "@ Enter ARM Mode \n\t" + "adr r3, 1f \n\t" + "bx r3 \n\t" + ".ALIGN 4 \n\t" + ".ARM \n" + "1: svc 0x9f0002 \n" + "@ Enter THUMB Mode\n\t" + "adr r3, 2f+1 \n\t" + "bx r3 \n\t" + ".THUMB \n" + "2: \n\t" + : "=r" (beg) + : "0" (beg), "r" (end), "r" (flg) + : "r3"); + #endif #endif #endif } diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 07a22722c8..22640ca1c5 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2006-2008 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -29,7 +29,7 @@ #if defined(V8_TARGET_ARCH_ARM) -#include "codegen.h" +#include "codegen-inl.h" #include "debug.h" namespace v8 { @@ -65,7 +65,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() { patcher.masm()->mov(v8::internal::lr, v8::internal::pc); patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4)); #endif - patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry()); + patcher.Emit(Debug::debug_break_return()->entry()); patcher.masm()->bkpt(0); } @@ -115,7 +115,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() { patcher.masm()->mov(v8::internal::lr, v8::internal::pc); patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4)); #endif - patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry()); + patcher.Emit(Debug::debug_break_slot()->entry()); } @@ -159,7 +159,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments - __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); + __ mov(r1, Operand(ExternalReference::debug_break())); CEntryStub ceb(1); __ CallStub(&ceb); @@ -185,9 +185,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. - ExternalReference after_break_target = - ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate()); - __ mov(ip, Operand(after_break_target)); + __ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget()))); __ ldr(ip, MemOperand(ip)); __ Jump(ip); } diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index cd70e6de82..339841875a 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -51,7 +51,6 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) { - HandleScope scope; AssertNoAllocation no_allocation; if (!function->IsOptimized()) return; @@ -75,6 +74,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { int deoptimization_index = safepoint_entry.deoptimization_index(); int gap_code_size = safepoint_entry.gap_code_size(); // Check that we did not shoot past next safepoint. + // TODO(srdjan): How do we guarantee that safepoint code does not + // overlap other safepoint patching code? CHECK(pc_offset >= last_pc_offset); #ifdef DEBUG // Destroy the code which is not supposed to be run again. @@ -111,9 +112,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); - node->set_next(data->deoptimizing_code_list_); - data->deoptimizing_code_list_ = node; + node->set_next(deoptimizing_code_list_); + deoptimizing_code_list_ = node; // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -122,11 +122,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { PrintF("[forced deoptimization: "); function->PrintName(); PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); -#ifdef DEBUG - if (FLAG_print_code) { - code->PrintLn(); - } -#endif } } @@ -267,9 +262,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_ = new FrameDescription*[1]; output_[0] = new(output_frame_size) FrameDescription( output_frame_size, function_); -#ifdef DEBUG - output_[0]->SetKind(Code::OPTIMIZED_FUNCTION); -#endif // Clear the incoming parameters in the optimized frame to avoid // confusing the garbage collector. @@ -291,33 +283,14 @@ void Deoptimizer::DoComputeOsrOutputFrame() { // There are no translation commands for the caller's pc and fp, the // context, and the function. Set them up explicitly. - for (int i = StandardFrameConstants::kCallerPCOffset; - ok && i >= StandardFrameConstants::kMarkerOffset; - i -= kPointerSize) { + for (int i = 0; ok && i < 4; i++) { uint32_t input_value = input_->GetFrameSlot(input_offset); if (FLAG_trace_osr) { - const char* name = "UNKNOWN"; - switch (i) { - case StandardFrameConstants::kCallerPCOffset: - name = "caller's pc"; - break; - case StandardFrameConstants::kCallerFPOffset: - name = "fp"; - break; - case StandardFrameConstants::kContextOffset: - name = "context"; - break; - case StandardFrameConstants::kMarkerOffset: - name = "function"; - break; - } - PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n", + PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n", output_offset, input_value, - input_offset, - name); + input_offset); } - output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset)); input_offset -= kPointerSize; output_offset -= kPointerSize; @@ -343,7 +316,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { optimized_code_->entry() + pc_offset); output_[0]->SetPc(pc); } - Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR); + Code* continuation = Builtins::builtin(Builtins::NotifyOSR); output_[0]->SetContinuation( reinterpret_cast<uint32_t>(continuation->entry())); @@ -385,9 +358,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // Allocate and store the output frame description. FrameDescription* output_frame = new(output_frame_size) FrameDescription(output_frame_size, function); -#ifdef DEBUG - output_frame->SetKind(Code::FUNCTION); -#endif bool is_bottommost = (0 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index); @@ -520,13 +490,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, FullCodeGenerator::StateField::decode(pc_and_state); output_frame->SetState(Smi::FromInt(state)); - // Set the continuation for the topmost frame. - if (is_topmost && bailout_type_ != DEBUGGER) { - Builtins* builtins = isolate_->builtins(); + if (is_topmost) { Code* continuation = (bailout_type_ == EAGER) - ? builtins->builtin(Builtins::kNotifyDeoptimized) - : builtins->builtin(Builtins::kNotifyLazyDeoptimized); + ? Builtins::builtin(Builtins::NotifyDeoptimized) + : Builtins::builtin(Builtins::NotifyLazyDeoptimized); output_frame->SetContinuation( reinterpret_cast<uint32_t>(continuation->entry())); } @@ -535,36 +503,13 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, } -void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { - // Set the register values. The values are not important as there are no - // callee saved registers in JavaScript frames, so all registers are - // spilled. Registers fp and sp are set to the correct values though. - - for (int i = 0; i < Register::kNumRegisters; i++) { - input_->SetRegister(i, i * 4); - } - input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); - input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { - input_->SetDoubleRegister(i, 0.0); - } - - // Fill the frame content from the actual data on the frame. - for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { - input_->SetFrameSlot(i, Memory::uint32_at(tos + i)); - } -} - - #define __ masm()-> + // This code tries to be close to ia32 code so that any changes can be // easily ported. void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - - Isolate* isolate = masm()->isolate(); - CpuFeatures::Scope scope(VFP3); // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -575,21 +520,13 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumAllocatableRegisters; - // Save all VFP registers before messing with them. - DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0); - DwVfpRegister last = - DwVfpRegister::FromAllocationIndex( - DwVfpRegister::kNumAllocatableRegisters - 1); - ASSERT(last.code() > first.code()); - ASSERT((last.code() - first.code()) == - (DwVfpRegister::kNumAllocatableRegisters - 1)); -#ifdef DEBUG - for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) { - ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) && - (DwVfpRegister::FromAllocationIndex(i).code() >= first.code())); + // Save all general purpose registers before messing with them. + __ sub(sp, sp, Operand(kDoubleRegsSize)); + for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { + DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i); + int offset = i * kDoubleSize; + __ vstr(vfp_reg, sp, offset); } -#endif - __ vstm(db_w, sp, first, last); // Push all 16 registers (needed to populate FrameDescription::registers_). __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); @@ -620,16 +557,14 @@ void Deoptimizer::EntryGenerator::Generate() { // Allocate a new deoptimizer object. // Pass four arguments in r0 to r3 and fifth argument on stack. - __ PrepareCallCFunction(6, r5); + __ PrepareCallCFunction(5, r5); __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(r1, Operand(type())); // bailout type, // r2: bailout id already loaded. // r3: code address or 0 already loaded. __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. - __ mov(r5, Operand(ExternalReference::isolate_address())); - __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); // Preserve "deoptimizer" object in register r0 and get the input // frame descriptor pointer to r1 (deoptimizer->input_); @@ -683,8 +618,7 @@ void Deoptimizer::EntryGenerator::Generate() { // r0: deoptimizer object; r1: scratch. __ PrepareCallCFunction(1, r1); // Call Deoptimizer::ComputeOutputFrames(). - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. @@ -734,7 +668,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(ip); // remove lr // Set up the roots register. - ExternalReference roots_address = ExternalReference::roots_address(isolate); + ExternalReference roots_address = ExternalReference::roots_address(); __ mov(r10, Operand(roots_address)); __ pop(ip); // remove pc diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index d4bd81ce46..08f605b164 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -89,9 +89,6 @@ class Decoder { // Returns the length of the disassembled machine instruction in bytes. int InstructionDecode(byte* instruction); - static bool IsConstantPoolAt(byte* instr_ptr); - static int ConstantPoolSizeAt(byte* instr_ptr); - private: // Bottleneck functions to print into the out_buffer. void PrintChar(const char ch); @@ -371,34 +368,25 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) { int Decoder::FormatVFPRegister(Instruction* instr, const char* format) { ASSERT((format[0] == 'S') || (format[0] == 'D')); - VFPRegPrecision precision = - format[0] == 'D' ? kDoublePrecision : kSinglePrecision; - - int retval = 2; - int reg = -1; if (format[1] == 'n') { - reg = instr->VFPNRegValue(precision); + int reg = instr->VnValue(); + if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue())); + if (format[0] == 'D') PrintDRegister(reg); + return 2; } else if (format[1] == 'm') { - reg = instr->VFPMRegValue(precision); + int reg = instr->VmValue(); + if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue())); + if (format[0] == 'D') PrintDRegister(reg); + return 2; } else if (format[1] == 'd') { - reg = instr->VFPDRegValue(precision); - if (format[2] == '+') { - int immed8 = instr->Immed8Value(); - if (format[0] == 'S') reg += immed8 - 1; - if (format[0] == 'D') reg += (immed8 / 2 - 1); - } - if (format[2] == '+') retval = 3; - } else { - UNREACHABLE(); - } - - if (precision == kSinglePrecision) { - PrintSRegister(reg); - } else { - PrintDRegister(reg); + int reg = instr->VdValue(); + if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue())); + if (format[0] == 'D') PrintDRegister(reg); + return 2; } - return retval; + UNREACHABLE(); + return -1; } @@ -502,16 +490,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { ASSERT(STRING_STARTS_WITH(format, "memop")); if (instr->HasL()) { Print("ldr"); - } else { - if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) && - (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) { - if (instr->Bit(5) == 1) { - Print("strd"); - } else { - Print("ldrd"); - } - return 5; + } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) { + if (instr->Bits(7, 4) == 0xf) { + Print("strd"); + } else { + Print("ldrd"); } + } else { Print("str"); } return 5; @@ -914,7 +899,6 @@ void Decoder::DecodeType2(Instruction* instr) { case da_x: { if (instr->HasW()) { Unknown(instr); // not used in V8 - return; } Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12"); break; @@ -922,7 +906,6 @@ void Decoder::DecodeType2(Instruction* instr) { case ia_x: { if (instr->HasW()) { Unknown(instr); // not used in V8 - return; } Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12"); break; @@ -1009,15 +992,11 @@ void Decoder::DecodeType3(Instruction* instr) { void Decoder::DecodeType4(Instruction* instr) { - if (instr->Bit(22) != 0) { - // Privileged mode currently not supported. - Unknown(instr); + ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported. + if (instr->HasL()) { + Format(instr, "ldm'cond'pu 'rn'w, 'rlist"); } else { - if (instr->HasL()) { - Format(instr, "ldm'cond'pu 'rn'w, 'rlist"); - } else { - Format(instr, "stm'cond'pu 'rn'w, 'rlist"); - } + Format(instr, "stm'cond'pu 'rn'w, 'rlist"); } } @@ -1063,8 +1042,6 @@ int Decoder::DecodeType7(Instruction* instr) { // vmov: Rt = Sn // vcvt: Dd = Sm // vcvt: Sd = Dm -// Dd = vabs(Dm) -// Dd = vneg(Dm) // Dd = vadd(Dn, Dm) // Dd = vsub(Dn, Dm) // Dd = vmul(Dn, Dm) @@ -1089,10 +1066,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) { // vabs - Format(instr, "vabs.f64'cond 'Dd, 'Dm"); - } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) { - // vneg - Format(instr, "vneg.f64'cond 'Dd, 'Dm"); + Format(instr, "vabs'cond 'Dd, 'Dm"); } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { @@ -1285,22 +1259,9 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]"); } break; - case 0x4: - case 0x5: - case 0x6: - case 0x7: - case 0x9: - case 0xB: { - bool to_vfp_register = (instr->VLValue() == 0x1); - if (to_vfp_register) { - Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}"); - } else { - Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}"); - } - break; - } default: Unknown(instr); // Not used by V8. + break; } } else if (instr->CoprocessorValue() == 0xB) { switch (instr->OpcodeValue()) { @@ -1328,38 +1289,12 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]"); } break; - case 0x4: - case 0x5: - case 0x9: { - bool to_vfp_register = (instr->VLValue() == 0x1); - if (to_vfp_register) { - Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}"); - } else { - Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}"); - } - break; - } default: Unknown(instr); // Not used by V8. + break; } } else { - Unknown(instr); // Not used by V8. - } -} - - -bool Decoder::IsConstantPoolAt(byte* instr_ptr) { - int instruction_bits = *(reinterpret_cast<int*>(instr_ptr)); - return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker; -} - - -int Decoder::ConstantPoolSizeAt(byte* instr_ptr) { - if (IsConstantPoolAt(instr_ptr)) { - int instruction_bits = *(reinterpret_cast<int*>(instr_ptr)); - return instruction_bits & kConstantPoolLengthMask; - } else { - return -1; + UNIMPLEMENTED(); // Not used by V8. } } @@ -1372,15 +1307,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) { "%08x ", instr->InstructionBits()); if (instr->ConditionField() == kSpecialCondition) { - Unknown(instr); - return Instruction::kInstrSize; - } - int instruction_bits = *(reinterpret_cast<int*>(instr_ptr)); - if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) { - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "constant pool begin (length %d)", - instruction_bits & - kConstantPoolLengthMask); + UNIMPLEMENTED(); return Instruction::kInstrSize; } switch (instr->TypeValue()) { @@ -1432,8 +1359,9 @@ namespace disasm { const char* NameConverter::NameOfAddress(byte* addr) const { - v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); - return tmp_buffer_.start(); + static v8::internal::EmbeddedVector<char, 32> tmp_buffer; + v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr); + return tmp_buffer.start(); } @@ -1483,7 +1411,12 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, int Disassembler::ConstantPoolSizeAt(byte* instruction) { - return v8::internal::Decoder::ConstantPoolSizeAt(instruction); + int instruction_bits = *(reinterpret_cast<int*>(instruction)); + if ((instruction_bits & 0xfff00000) == 0x03000000) { + return instruction_bits & 0x0000ffff; + } else { + return -1; + } } diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 84e108b3dc..4aa8d6aa9a 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -72,9 +72,6 @@ static const RegList kCalleeSaved = static const int kNumCalleeSaved = 7 + kR9Available; -// Double registers d8 to d15 are callee-saved. -static const int kNumDoubleCalleeSaved = 8; - // Number of registers for which space is reserved in safepoints. Must be a // multiple of 8. @@ -139,7 +136,7 @@ class JavaScriptFrameConstants : public AllStatic { public: // FP-relative. static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; - static const int kLastParameterOffset = +2 * kPointerSize; + static const int kSavedRegistersOffset = +2 * kPointerSize; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; // Caller SP-relative. diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 4b55915e91..7a47644781 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -30,7 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "code-stubs.h" -#include "codegen.h" +#include "codegen-inl.h" #include "compiler.h" #include "debug.h" #include "full-codegen.h" @@ -46,12 +46,6 @@ namespace internal { #define __ ACCESS_MASM(masm_) -static unsigned GetPropertyId(Property* property) { - if (property->is_synthetic()) return AstNode::kNoNumber; - return property->id(); -} - - // A patch site is a location in the code which it is possible to patch. This // class has a number of methods to emit the code which is patchable and the // method EmitPatchInfo to record a marker back to the patchable code. This @@ -92,19 +86,17 @@ class JumpPatchSite BASE_EMBEDDED { } void EmitPatchInfo() { - if (patch_site_.is_bound()) { - int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); - Register reg; - reg.set_code(delta_to_patch_site / kOff12Mask); - __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask); + int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); + Register reg; + reg.set_code(delta_to_patch_site / kOff12Mask); + __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask); #ifdef DEBUG - info_emitted_ = true; + info_emitted_ = true; #endif - } else { - __ nop(); // Signals no inlined code. - } } + bool is_bound() const { return patch_site_.is_bound(); } + private: MacroAssembler* masm_; Label patch_site_; @@ -131,7 +123,6 @@ class JumpPatchSite BASE_EMBEDDED { void FullCodeGenerator::Generate(CompilationInfo* info) { ASSERT(info_ == NULL); info_ = info; - scope_ = info->scope(); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -142,21 +133,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { } #endif - // Strict mode functions and builtins need to replace the receiver - // with undefined when called as functions (without an explicit - // receiver object). r5 is zero for method calls and non-zero for - // function calls. - if (info->is_strict_mode() || info->is_native()) { - Label ok; - __ cmp(r5, Operand(0)); - __ b(eq, &ok); - int receiver_offset = info->scope()->num_parameters() * kPointerSize; - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ str(r2, MemOperand(sp, receiver_offset)); - __ bind(&ok); - } - - int locals_count = info->scope()->num_stack_slots(); + int locals_count = scope()->num_stack_slots(); __ Push(lr, fp, cp, r1); if (locals_count > 0) { @@ -176,7 +153,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { bool function_in_register = true; // Possibly allocate a local context. - int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate local context"); // Argument to NewContext is the function, which is in r1. @@ -185,14 +162,14 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { FastNewContextStub stub(heap_slots); __ CallStub(&stub); } else { - __ CallRuntime(Runtime::kNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewContext, 1); } function_in_register = false; // Context is returned in both r0 and cp. It replaces the context // passed to us. It's saved in the stack and kept live in cp. __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); // Copy any necessary parameters into the context. - int num_parameters = info->scope()->num_parameters(); + int num_parameters = scope()->num_parameters(); for (int i = 0; i < num_parameters; i++) { Slot* slot = scope()->parameter(i)->AsSlot(); if (slot != NULL && slot->type() == Slot::CONTEXT) { @@ -223,29 +200,23 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ mov(r3, r1); } // Receiver is just before the parameters on the caller's stack. - int num_parameters = info->scope()->num_parameters(); - int offset = num_parameters * kPointerSize; + int offset = scope()->num_parameters() * kPointerSize; __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset)); - __ mov(r1, Operand(Smi::FromInt(num_parameters))); + __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters()))); __ Push(r3, r2, r1); // Arguments to ArgumentsAccessStub: // function, receiver address, parameter count. // The stub will rewrite receiever and parameter count if the previous // stack frame was an arguments adapter frame. - ArgumentsAccessStub::Type type; - if (is_strict_mode()) { - type = ArgumentsAccessStub::NEW_STRICT; - } else if (function()->has_duplicate_parameters()) { - type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW; - } else { - type = ArgumentsAccessStub::NEW_NON_STRICT_FAST; - } - ArgumentsAccessStub stub(type); + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); __ CallStub(&stub); - + // Duplicate the value; move-to-slot operation might clobber registers. + __ mov(r3, r0); Move(arguments->AsSlot(), r0, r1, r2); + Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot(); + Move(dot_arguments_slot, r3, r1, r2); } if (FLAG_trace) { @@ -269,7 +240,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { } { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS); + PrepareForBailout(info->function(), NO_REGISTERS); Label ok; __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); @@ -349,7 +320,7 @@ void FullCodeGenerator::EmitReturnSequence() { { Assembler::BlockConstPoolScope block_const_pool(masm_); // Here we use masm_-> instead of the __ macro to avoid the code coverage // tool from instrumenting as we rely on the code size here. - int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; + int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize; CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); __ RecordJSReturn(); masm_->mov(sp, fp); @@ -387,7 +358,7 @@ void FullCodeGenerator::TestContext::Plug(Slot* slot) const { // For simplicity we always test the accumulator register. codegen()->Move(result_register(), slot); codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL); - codegen()->DoTest(this); + codegen()->DoTest(true_label_, false_label_, fall_through_); } @@ -421,7 +392,7 @@ void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const { if (true_label_ != fall_through_) __ b(true_label_); } else { __ LoadRoot(result_register(), index); - codegen()->DoTest(this); + codegen()->DoTest(true_label_, false_label_, fall_through_); } } @@ -455,7 +426,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const { if (true_label_ != fall_through_) __ b(true_label_); } else if (lit->IsString()) { if (String::cast(*lit)->length() == 0) { - if (false_label_ != fall_through_) __ b(false_label_); + if (false_label_ != fall_through_) __ b(false_label_); + __ b(false_label_); } else { if (true_label_ != fall_through_) __ b(true_label_); } @@ -468,7 +440,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const { } else { // For simplicity we always test the accumulator register. __ mov(result_register(), Operand(lit)); - codegen()->DoTest(this); + codegen()->DoTest(true_label_, false_label_, fall_through_); } } @@ -504,7 +476,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, __ Drop(count); __ Move(result_register(), reg); codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL); - codegen()->DoTest(this); + codegen()->DoTest(true_label_, false_label_, fall_through_); } @@ -582,11 +554,27 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const { } -void FullCodeGenerator::DoTest(Expression* condition, - Label* if_true, +void FullCodeGenerator::DoTest(Label* if_true, Label* if_false, Label* fall_through) { if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Emit the inlined tests assumed by the stub. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_false); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_true); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(result_register(), ip); + __ b(eq, if_false); + STATIC_ASSERT(kSmiTag == 0); + __ tst(result_register(), result_register()); + __ b(eq, if_false); + __ JumpIfSmi(result_register(), if_true); + + // Call the ToBoolean stub for all other cases. ToBooleanStub stub(result_register()); __ CallStub(&stub); __ tst(result_register(), result_register()); @@ -598,6 +586,8 @@ void FullCodeGenerator::DoTest(Expression* condition, __ LoadRoot(ip, Heap::kFalseValueRootIndex); __ cmp(r0, ip); } + + // The stub returns nonzero for true. Split(ne, if_true, if_false, fall_through); } @@ -717,12 +707,10 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { - // Check that we're not inside a with or catch context. - __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); - __ CompareRoot(r1, Heap::kWithContextMapRootIndex); - __ Check(ne, "Declaration in with context."); - __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); - __ Check(ne, "Declaration in catch context."); + // Check that we're not inside a 'with'. + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ cmp(r1, cp); + __ Check(eq, "Unexpected declaration in current context."); } if (mode == Variable::CONST) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -767,30 +755,31 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, } } else if (prop != NULL) { - // A const declaration aliasing a parameter is an illegal redeclaration. - ASSERT(mode != Variable::CONST); - if (function != NULL) { - // We are declaring a function that rewrites to a property. - // Use (keyed) IC to set the initial value. We cannot visit the - // rewrite because it's shared and we risk recording duplicate AST - // IDs for bailouts from optimized code. + if (function != NULL || mode == Variable::CONST) { + // We are declaring a function or constant that rewrites to a + // property. Use (keyed) IC to set the initial value. We + // cannot visit the rewrite because it's shared and we risk + // recording duplicate AST IDs for bailouts from optimized code. ASSERT(prop->obj()->AsVariableProxy() != NULL); { AccumulatorValueContext for_object(this); EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); } - - __ push(r0); - VisitForAccumulatorValue(function); - __ pop(r2); - + if (function != NULL) { + __ push(r0); + VisitForAccumulatorValue(function); + __ pop(r2); + } else { + __ mov(r2, r0); + __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); + } ASSERT(prop->key()->AsLiteral() != NULL && prop->key()->AsLiteral()->handle()->IsSmi()); __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() - : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ Call(ic); + Handle<Code> ic(Builtins::builtin(is_strict() + ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); // Value in r0 is ignored (declarations are statements). } } @@ -830,7 +819,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Compile all the tests with branches to their bodies. for (int i = 0; i < clauses->length(); i++) { CaseClause* clause = clauses->at(i); - clause->body_target()->Unuse(); + clause->body_target()->entry_label()->Unuse(); // The default is not a test, but remember it as final fall through. if (clause->is_default()) { @@ -857,20 +846,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { __ cmp(r1, r0); __ b(ne, &next_test); __ Drop(1); // Switch value is no longer needed. - __ b(clause->body_target()); + __ b(clause->body_target()->entry_label()); __ bind(&slow_case); } // Record position before stub call for type feedback. SetSourcePosition(clause->position()); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); - __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); - patch_site.EmitPatchInfo(); - + EmitCallIC(ic, &patch_site); __ cmp(r0, Operand(0)); __ b(ne, &next_test); __ Drop(1); // Switch value is no longer needed. - __ b(clause->body_target()); + __ b(clause->body_target()->entry_label()); } // Discard the test value and jump to the default if present, otherwise to @@ -880,15 +867,14 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { if (default_clause == NULL) { __ b(nested_statement.break_target()); } else { - __ b(default_clause->body_target()); + __ b(default_clause->body_target()->entry_label()); } // Compile all the case bodies. for (int i = 0; i < clauses->length(); i++) { Comment cmnt(masm_, "[ Case body"); CaseClause* clause = clauses->at(i); - __ bind(clause->body_target()); - PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS); + __ bind(clause->body_target()->entry_label()); VisitStatements(clause->statements()); } @@ -920,11 +906,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // Convert the object to a JS object. Label convert, done_convert; __ JumpIfSmi(r0, &convert); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &done_convert); + __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); + __ b(hs, &done_convert); __ bind(&convert); __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); __ bind(&done_convert); __ push(r0); @@ -952,8 +938,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // check for an enum cache. Leave the map in r2 for the subsequent // prototype load. __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset)); - __ JumpIfSmi(r3, &call_runtime); + __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset)); + __ cmp(r3, empty_descriptor_array_value); + __ b(eq, &call_runtime); // Check that there is an enum cache in the non-empty instance // descriptors (r3). This is the case if the next enumeration @@ -998,7 +985,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // We got a map in register r0. Get the enumeration cache from it. __ bind(&use_cache); - __ LoadInstanceDescriptors(r0, r1); + __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset)); __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); @@ -1047,7 +1034,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // just skip it. __ push(r1); // Enumerable. __ push(r3); // Current entry. - __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS); __ mov(r3, Operand(r0), SetCC); __ b(eq, loop_statement.continue_target()); @@ -1093,10 +1080,10 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, // doesn't just get a copy of the existing unoptimized code. if (!FLAG_always_opt && !FLAG_prepare_always_opt && - !pretenure && scope()->is_function_scope() && - info->num_literals() == 0) { - FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode); + info->num_literals() == 0 && + !pretenure) { + FastNewClosureStub stub; __ mov(r0, Operand(info)); __ push(r0); __ CallStub(&stub); @@ -1117,65 +1104,6 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { } -void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions( - Slot* slot, - TypeofState typeof_state, - Label* slow) { - Register current = cp; - Register next = r1; - Register temp = r2; - - Scope* s = scope(); - while (s != NULL) { - if (s->num_heap_slots() > 0) { - if (s->calls_eval()) { - // Check that extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); - __ tst(temp, temp); - __ b(ne, slow); - } - // Load next context in chain. - __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX)); - // Walk the rest of the chain without clobbering cp. - current = next; - } - // If no outer scope calls eval, we do not need to check more - // context extensions. - if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; - s = s->outer_scope(); - } - - if (s->is_eval_scope()) { - Label loop, fast; - if (!current.is(next)) { - __ Move(next, current); - } - __ bind(&loop); - // Terminate at global context. - __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex); - __ cmp(temp, ip); - __ b(eq, &fast); - // Check that extension is NULL. - __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX)); - __ tst(temp, temp); - __ b(ne, slow); - // Load next context in chain. - __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX)); - __ b(&loop); - __ bind(&fast); - } - - __ ldr(r0, GlobalObjectOperand()); - __ mov(r2, Operand(slot->var()->name())); - RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) - ? RelocInfo::CODE_TARGET - : RelocInfo::CODE_TARGET_CONTEXT; - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ Call(ic, mode); -} - - MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( Slot* slot, Label* slow) { @@ -1192,7 +1120,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( __ tst(temp, temp); __ b(ne, slow); } - __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX)); + __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX)); + __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); // Walk the rest of the chain without clobbering cp. context = next; } @@ -1251,9 +1180,8 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(), slow)); __ mov(r0, Operand(key_literal->handle())); - Handle<Code> ic = - isolate()->builtins()->KeyedLoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property)); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); __ jmp(done); } } @@ -1262,23 +1190,85 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( } +void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions( + Slot* slot, + TypeofState typeof_state, + Label* slow) { + Register current = cp; + Register next = r1; + Register temp = r2; + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ tst(temp, temp); + __ b(ne, slow); + } + // Load next context in chain. + __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); + // Walk the rest of the chain without clobbering cp. + current = next; + } + // If no outer scope calls eval, we do not need to check more + // context extensions. + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s->is_eval_scope()) { + Label loop, fast; + if (!current.is(next)) { + __ Move(next, current); + } + __ bind(&loop); + // Terminate at global context. + __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex); + __ cmp(temp, ip); + __ b(eq, &fast); + // Check that extension is NULL. + __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX)); + __ tst(temp, temp); + __ b(ne, slow); + // Load next context in chain. + __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX)); + __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); + __ b(&loop); + __ bind(&fast); + } + + __ ldr(r0, GlobalObjectOperand()); + __ mov(r2, Operand(slot->var()->name())); + RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT; + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + EmitCallIC(ic, mode); +} + + void FullCodeGenerator::EmitVariableLoad(Variable* var) { - // Three cases: non-this global variables, lookup slots, and all other - // types of slots. + // Four cases: non-this global variables, lookup slots, all other + // types of slots, and parameters that rewrite to explicit property + // accesses on the arguments object. Slot* slot = var->AsSlot(); - ASSERT((var->is_global() && !var->is_this()) == (slot == NULL)); + Property* property = var->AsProperty(); - if (slot == NULL) { + if (var->is_global() && !var->is_this()) { Comment cmnt(masm_, "Global variable"); // Use inline caching. Variable name is passed in r2 and the global // object (receiver) in r0. __ ldr(r0, GlobalObjectOperand()); __ mov(r2, Operand(var->name())); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); context()->Plug(r0); - } else if (slot->type() == Slot::LOOKUP) { + } else if (slot != NULL && slot->type() == Slot::LOOKUP) { Label done, slow; // Generate code for loading from variables potentially shadowed @@ -1294,7 +1284,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) { context()->Plug(r0); - } else { + } else if (slot != NULL) { Comment cmnt(masm_, (slot->type() == Slot::CONTEXT) ? "Context slot" : "Stack slot"); @@ -1310,6 +1300,32 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) { } else { context()->Plug(slot); } + } else { + Comment cmnt(masm_, "Rewritten parameter"); + ASSERT_NOT_NULL(property); + // Rewritten parameter accesses are of the form "slot[literal]". + + // Assert that the object is in a slot. + Variable* object_var = property->obj()->AsVariableProxy()->AsVariable(); + ASSERT_NOT_NULL(object_var); + Slot* object_slot = object_var->AsSlot(); + ASSERT_NOT_NULL(object_slot); + + // Load the object. + Move(r1, object_slot); + + // Assert that the key is a smi. + Literal* key_literal = property->key()->AsLiteral(); + ASSERT_NOT_NULL(key_literal); + ASSERT(key_literal->handle()->IsSmi()); + + // Load the key. + __ mov(r0, Operand(key_literal->handle())); + + // Call keyed load IC. It has arguments key and receiver in r0 and r1. + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); + context()->Plug(r0); } } @@ -1371,13 +1387,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(expr->constant_properties())); - int flags = expr->fast_elements() - ? ObjectLiteral::kFastElements - : ObjectLiteral::kNoFlags; - flags |= expr->has_function() - ? ObjectLiteral::kHasFunction - : ObjectLiteral::kNoFlags; - __ mov(r0, Operand(Smi::FromInt(flags))); + __ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0))); __ Push(r3, r2, r1, r0); if (expr->depth() > 1) { __ CallRuntime(Runtime::kCreateObjectLiteral, 4); @@ -1416,10 +1426,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { VisitForAccumulatorValue(value); __ mov(r2, Operand(key->handle())); __ ldr(r1, MemOperand(sp)); - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, key->id()); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { VisitForEffect(value); @@ -1457,13 +1465,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { } } - if (expr->has_function()) { - ASSERT(result_saved); - __ ldr(r0, MemOperand(sp)); - __ push(r0); - __ CallRuntime(Runtime::kToFastProperties, 1); - } - if (result_saved) { context()->PlugTOS(); } else { @@ -1483,13 +1484,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(expr->constant_elements())); __ Push(r3, r2, r1); - if (expr->constant_elements()->map() == - isolate()->heap()->fixed_cow_array_map()) { + if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) { FastCloneShallowArrayStub stub( FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); __ CallStub(&stub); - __ IncrementCounter( - isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2); } else if (expr->depth() > 1) { __ CallRuntime(Runtime::kCreateArrayLiteral, 3); } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { @@ -1550,7 +1549,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } // Left-hand side can only be a property, a global or a (parameter or local) - // slot. + // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; LhsKind assign_type = VARIABLE; Property* property = expr->target()->AsProperty(); @@ -1576,37 +1575,52 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { break; case KEYED_PROPERTY: if (expr->is_compound()) { - VisitForStackValue(property->obj()); - VisitForAccumulatorValue(property->key()); + if (property->is_arguments_access()) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0)); + __ push(r0); + __ mov(r0, Operand(property->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(property->obj()); + VisitForAccumulatorValue(property->key()); + } __ ldr(r1, MemOperand(sp, 0)); __ push(r0); } else { - VisitForStackValue(property->obj()); - VisitForStackValue(property->key()); + if (property->is_arguments_access()) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0)); + __ mov(r0, Operand(property->key()->AsLiteral()->handle())); + __ Push(r1, r0); + } else { + VisitForStackValue(property->obj()); + VisitForStackValue(property->key()); + } } break; } - // For compound assignments we need another deoptimization point after the - // variable/property load. if (expr->is_compound()) { { AccumulatorValueContext context(this); switch (assign_type) { case VARIABLE: EmitVariableLoad(expr->target()->AsVariableProxy()->var()); - PrepareForBailout(expr->target(), TOS_REG); break; case NAMED_PROPERTY: EmitNamedPropertyLoad(property); - PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG); break; case KEYED_PROPERTY: EmitKeyedPropertyLoad(property); - PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG); break; } } + // For property compound assignments we need another deoptimization + // point after the property load. + if (property != NULL) { + PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG); + } + Token::Value op = expr->binary_op(); __ push(r0); // Left operand goes on the stack. VisitForAccumulatorValue(expr->value()); @@ -1617,13 +1631,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { SetSourcePosition(expr->position() + 1); AccumulatorValueContext context(this); if (ShouldInlineSmiCase(op)) { - EmitInlineSmiBinaryOp(expr->binary_operation(), + EmitInlineSmiBinaryOp(expr, op, mode, expr->target(), expr->value()); } else { - EmitBinaryOp(expr->binary_operation(), op, mode); + EmitBinaryOp(op, mode); } // Deoptimization point in case the binary operation may have side effects. @@ -1658,20 +1672,20 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { Literal* key = prop->key()->AsLiteral(); __ mov(r2, Operand(key->handle())); // Call load IC. It has arguments receiver and property name r0 and r2. - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); } void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); // Call keyed load IC. It has arguments key and receiver in r0 and r1. - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); } -void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, +void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, Token::Value op, OverwriteMode mode, Expression* left_expr, @@ -1693,15 +1707,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, patch_site.EmitJumpIfSmi(scratch1, &smi_case); __ bind(&stub_call); - BinaryOpStub stub(op, mode); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); - patch_site.EmitPatchInfo(); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), &patch_site); __ jmp(&done); __ bind(&smi_case); // Smi case. This code works the same way as the smi-smi case in the type // recording binary operation stub, see - // BinaryOpStub::GenerateSmiSmiOperation for comments. + // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments. switch (op) { case Token::SAR: __ b(&stub_call); @@ -1771,14 +1784,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, } -void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, - Token::Value op, +void FullCodeGenerator::EmitBinaryOp(Token::Value op, OverwriteMode mode) { __ pop(r1); - BinaryOpStub stub(op, mode); - JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); - patch_site.EmitPatchInfo(); + TypeRecordingBinaryOpStub stub(op, mode); + EmitCallIC(stub.GetCode(), NULL); context()->Plug(r0); } @@ -1792,7 +1802,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { } // Left-hand side can only be a property, a global or a (parameter or local) - // slot. + // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; LhsKind assign_type = VARIABLE; Property* prop = expr->AsProperty(); @@ -1815,23 +1825,33 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { __ mov(r1, r0); __ pop(r0); // Restore value. __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); - __ Call(ic); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } case KEYED_PROPERTY: { __ push(r0); // Preserve value. - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); - __ mov(r1, r0); - __ pop(r2); + if (prop->is_synthetic()) { + ASSERT(prop->obj()->AsVariableProxy() != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + { AccumulatorValueContext for_object(this); + EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + } + __ mov(r2, r0); + __ mov(r1, Operand(prop->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + __ mov(r1, r0); + __ pop(r2); + } __ pop(r0); // Restore value. - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() - : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ Call(ic); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); break; } } @@ -1842,6 +1862,8 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { + // Left-hand sides that rewrite to explicit property accesses do not reach + // here. ASSERT(var != NULL); ASSERT(var->is_global() || var->AsSlot() != NULL); @@ -1852,10 +1874,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // r2, and the global object in r1. __ mov(r2, Operand(var->name())); __ ldr(r1, GlobalObjectOperand()); - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); } else if (op == Token::INIT_CONST) { // Like var declarations, const declarations are hoisted to function @@ -1877,7 +1899,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ b(ne, &skip); __ str(result_register(), MemOperand(fp, SlotOffset(slot))); break; - case Slot::CONTEXT: + case Slot::CONTEXT: { + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ ldr(r2, ContextOperand(r1, slot->index())); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r2, ip); + __ b(ne, &skip); + __ str(r0, ContextOperand(r1, slot->index())); + int offset = Context::SlotOffset(slot->index()); + __ mov(r3, r0); // Preserve the stored value in r0. + __ RecordWrite(r1, Operand(offset), r3, r2); + break; + } case Slot::LOOKUP: __ push(r0); __ mov(r0, Operand(slot->var()->name())); @@ -1950,10 +1983,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { __ pop(r1); } - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { @@ -1996,10 +2029,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { __ pop(r2); } - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() - : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { @@ -2049,9 +2082,8 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, SetSourcePosition(expr->position()); // Call the IC initialization code. InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; - Handle<Code> ic = - isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode); - __ Call(ic, mode, expr->id()); + Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop); + EmitCallIC(ic, mode); RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2060,7 +2092,8 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, - Expression* key) { + Expression* key, + RelocInfo::Mode mode) { // Load the key. VisitForAccumulatorValue(key); @@ -2082,10 +2115,9 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, SetSourcePosition(expr->position()); // Call the IC initialization code. InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; - Handle<Code> ic = - isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop); + Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop); __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key. - __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); + EmitCallIC(ic, mode); RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2093,7 +2125,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, } -void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { +void FullCodeGenerator::EmitCallWithStub(Call* expr) { // Code common for calls using the call stub. ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); @@ -2105,7 +2137,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { // Record source position for debugger. SetSourcePosition(expr->position()); InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; - CallFunctionStub stub(arg_count, in_loop, flags); + CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); RecordJSReturnSite(expr); // Restore context register. @@ -2125,8 +2157,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, __ push(r1); // Push the receiver of the enclosing function and do runtime call. - int receiver_offset = 2 + info_->scope()->num_parameters(); - __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize)); + __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize)); __ push(r1); // Push the strict mode flag. __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); @@ -2202,7 +2233,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Record source position for debugger. SetSourcePosition(expr->position()); InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; - CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); RecordJSReturnSite(expr); // Restore context register. @@ -2245,17 +2276,14 @@ void FullCodeGenerator::VisitCall(Call* expr) { __ bind(&done); // Push function. __ push(r0); - // The receiver is implicitly the global receiver. Indicate this - // by passing the hole to the call function stub. - __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); + // Push global receiver. + __ ldr(r1, GlobalObjectOperand()); + __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ push(r1); __ bind(&call); } - // The receiver is either the global receiver or an object found - // by LoadContextSlot. That object could be the hole if the - // receiver is implicitly the global object. - EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT); + EmitCallWithStub(expr); } else if (fun->AsProperty() != NULL) { // Call to an object property. Property* prop = fun->AsProperty(); @@ -2269,7 +2297,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { } else { // Call to a keyed property. // For a synthetic property use keyed load IC followed by function call, - // for a regular property use EmitKeyedCallWithIC. + // for a regular property use keyed CallIC. if (prop->is_synthetic()) { // Do not visit the object and key subexpressions (they are shared // by all occurrences of the same rewritten parameter). @@ -2286,20 +2314,30 @@ void FullCodeGenerator::VisitCall(Call* expr) { // Record source code position for IC call. SetSourcePosition(prop->position()); - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); __ ldr(r1, GlobalObjectOperand()); __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ Push(r0, r1); // Function, receiver. - EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS); + EmitCallWithStub(expr); } else { { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(prop->obj()); } - EmitKeyedCallWithIC(expr, prop->key()); + EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET); } } } else { + // Call to some other expression. If the expression is an anonymous + // function literal not called in a loop, mark it as one that should + // also use the fast code generator. + FunctionLiteral* lit = fun->AsFunctionLiteral(); + if (lit != NULL && + lit->name()->Equals(Heap::empty_string()) && + loop_depth() == 0) { + lit->set_try_full_codegen(true); + } + { PreservePositionScope scope(masm()->positions_recorder()); VisitForStackValue(fun); } @@ -2308,7 +2346,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ push(r1); // Emit function call. - EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS); + EmitCallWithStub(expr); } #ifdef DEBUG @@ -2344,8 +2382,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ mov(r0, Operand(arg_count)); __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); - Handle<Code> construct_builtin = - isolate()->builtins()->JSConstructCall(); + Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall)); __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL); context()->Plug(r0); } @@ -2413,9 +2450,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) { __ tst(r1, Operand(1 << Map::kIsUndetectable)); __ b(ne, if_false); __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset)); - __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, if_false); - __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(le, if_true, if_false, fall_through); @@ -2436,7 +2473,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) { &if_true, &if_false, &fall_through); __ JumpIfSmi(r0, if_false); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(ge, if_true, if_false, fall_through); @@ -2481,74 +2518,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - if (FLAG_debug_code) __ AbortIfSmi(r0); - - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset)); - __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); - __ b(ne, if_true); - - // Check for fast case object. Generate false result for slow case object. - __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset)); - __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHashTableMapRootIndex); - __ cmp(r2, ip); - __ b(eq, if_false); - - // Look for valueOf symbol in the descriptor array, and indicate false if - // found. The type is not checked, so if it is a transition it is a false - // negative. - __ LoadInstanceDescriptors(r1, r4); - __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // r4: descriptor array - // r3: length of descriptor array - // Calculate the end of the descriptor array. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kPointerSize == 4); - __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); - - // Calculate location of the first key name. - __ add(r4, - r4, - Operand(FixedArray::kHeaderSize - kHeapObjectTag + - DescriptorArray::kFirstIndex * kPointerSize)); - // Loop through all the keys in the descriptor array. If one of these is the - // symbol valueOf the result is false. - Label entry, loop; - // The use of ip to store the valueOf symbol asumes that it is not otherwise - // used in the loop below. - __ mov(ip, Operand(FACTORY->value_of_symbol())); - __ jmp(&entry); - __ bind(&loop); - __ ldr(r3, MemOperand(r4, 0)); - __ cmp(r3, ip); - __ b(eq, if_false); - __ add(r4, r4, Operand(kPointerSize)); - __ bind(&entry); - __ cmp(r4, Operand(r2)); - __ b(ne, &loop); - - // If a valueOf property is not found on the object check that it's - // prototype is the un-modified String prototype. If not result is false. - __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset)); - __ JumpIfSmi(r2, if_false); - __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX)); - __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); - __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); - __ cmp(r2, r3); - __ b(ne, if_false); - - // Set the bit in the map to indicate that it has been checked safe for - // default valueOf and set true result. - __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset)); - __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); - __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset)); - __ jmp(if_true); - + // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only + // used in a few functions in runtime.js which should not normally be hit by + // this compiler. PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); + __ jmp(if_false); context()->Plug(if_true, if_false); } @@ -2678,7 +2652,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) { // parameter count in r0. VisitForAccumulatorValue(args->at(0)); __ mov(r1, r0); - __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); + __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters()))); ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); __ CallStub(&stub); context()->Plug(r0); @@ -2690,7 +2664,7 @@ void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) { Label exit; // Get the number of formal parameters. - __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); + __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters()))); // Check if the calling frame is an arguments adaptor frame. __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); @@ -2718,18 +2692,16 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. - __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); - // Map is now in r0. + __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); // Map is now in r0. __ b(lt, &null); - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE)); - __ b(ge, &function); + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ cmp(r1, Operand(JS_FUNCTION_TYPE)); + __ b(eq, &function); // Check if the constructor in the map is a function. __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); @@ -2806,9 +2778,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). if (CpuFeatures::IsSupported(VFP3)) { - __ PrepareCallCFunction(1, r0); - __ mov(r0, Operand(ExternalReference::isolate_address())); - __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); + __ PrepareCallCFunction(0, r1); + __ CallCFunction(ExternalReference::random_uint32_function(), 0); CpuFeatures::Scope scope(VFP3); // 0x41300000 is the top half of 1.0 x 2^20 as a double. @@ -2826,11 +2797,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { __ vstr(d7, r0, HeapNumber::kValueOffset); __ mov(r0, r4); } else { - __ PrepareCallCFunction(2, r0); __ mov(r0, Operand(r4)); - __ mov(r1, Operand(ExternalReference::isolate_address())); + __ PrepareCallCFunction(1, r1); __ CallCFunction( - ExternalReference::fill_heap_number_with_random_function(isolate()), 2); + ExternalReference::fill_heap_number_with_random_function(), 1); } context()->Plug(r0); @@ -2885,8 +2855,7 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) { ASSERT(args->length() == 2); VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - MathPowStub stub; - __ CallStub(&stub); + __ CallRuntime(Runtime::kMath_pow, 2); context()->Plug(r0); } @@ -3069,8 +3038,7 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) { // Load the argument on the stack and call the stub. - TranscendentalCacheStub stub(TranscendentalCache::SIN, - TranscendentalCacheStub::TAGGED); + TranscendentalCacheStub stub(TranscendentalCache::SIN); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); __ CallStub(&stub); @@ -3080,8 +3048,7 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) { // Load the argument on the stack and call the stub. - TranscendentalCacheStub stub(TranscendentalCache::COS, - TranscendentalCacheStub::TAGGED); + TranscendentalCacheStub stub(TranscendentalCache::COS); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); __ CallStub(&stub); @@ -3091,8 +3058,7 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) { // Load the argument on the stack and call the stub. - TranscendentalCacheStub stub(TranscendentalCache::LOG, - TranscendentalCacheStub::TAGGED); + TranscendentalCacheStub stub(TranscendentalCache::LOG); ASSERT(args->length() == 1); VisitForStackValue(args->at(0)); __ CallStub(&stub); @@ -3112,17 +3078,17 @@ void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) { ASSERT(args->length() >= 2); - int arg_count = args->length() - 2; // 2 ~ receiver and function. - for (int i = 0; i < arg_count + 1; i++) { - VisitForStackValue(args->at(i)); + int arg_count = args->length() - 2; // For receiver and function. + VisitForStackValue(args->at(0)); // Receiver. + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i + 1)); } - VisitForAccumulatorValue(args->last()); // Function. + VisitForAccumulatorValue(args->at(arg_count + 1)); // Function. - // InvokeFunction requires the function in r1. Move it in there. - __ mov(r1, result_register()); + // InvokeFunction requires function in r1. Move it in there. + if (!result_register().is(r1)) __ mov(r1, result_register()); ParameterCount count(arg_count); - __ InvokeFunction(r1, count, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeFunction(r1, count, CALL_FUNCTION); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->Plug(r0); } @@ -3144,79 +3110,7 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); - Label done; - Label slow_case; - Register object = r0; - Register index1 = r1; - Register index2 = r2; - Register elements = r3; - Register scratch1 = r4; - Register scratch2 = r5; - - __ ldr(object, MemOperand(sp, 2 * kPointerSize)); - // Fetch the map and check if array is in fast case. - // Check that object doesn't require security checks and - // has no indexed interceptor. - __ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE); - __ b(ne, &slow_case); - // Map is now in scratch1. - - __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); - __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); - __ b(ne, &slow_case); - - // Check the object's elements are in fast case and writable. - __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset)); - __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(scratch1, ip); - __ b(ne, &slow_case); - - // Check that both indices are smis. - __ ldr(index1, MemOperand(sp, 1 * kPointerSize)); - __ ldr(index2, MemOperand(sp, 0)); - __ JumpIfNotBothSmi(index1, index2, &slow_case); - - // Check that both indices are valid. - __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset)); - __ cmp(scratch1, index1); - __ cmp(scratch1, index2, hi); - __ b(ls, &slow_case); - - // Bring the address of the elements into index1 and index2. - __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(index1, - scratch1, - Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ add(index2, - scratch1, - Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize)); - - // Swap elements. - __ ldr(scratch1, MemOperand(index1, 0)); - __ ldr(scratch2, MemOperand(index2, 0)); - __ str(scratch1, MemOperand(index2, 0)); - __ str(scratch2, MemOperand(index1, 0)); - - Label new_space; - __ InNewSpace(elements, scratch1, eq, &new_space); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask.) - - __ mov(scratch1, elements); - __ RecordWriteHelper(elements, index1, scratch2); - __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. - - __ bind(&new_space); - // We are done. Drop elements from the stack, and return undefined. - __ Drop(3); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - __ jmp(&done); - - __ bind(&slow_case); __ CallRuntime(Runtime::kSwapElements, 3); - - __ bind(&done); context()->Plug(r0); } @@ -3228,7 +3122,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) { int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); Handle<FixedArray> jsfunction_result_caches( - isolate()->global_context()->jsfunction_result_caches()); + Top::global_context()->jsfunction_result_caches()); if (jsfunction_result_caches->length() <= cache_id) { __ Abort("Attempt to use undefined cache."); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); @@ -3289,7 +3183,8 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) { __ b(eq, &ok); // Fail if either is a non-HeapObject. __ and_(tmp, left, Operand(right)); - __ JumpIfSmi(tmp, &fail); + __ tst(tmp, Operand(kSmiTagMask)); + __ b(eq, &fail); __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset)); __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset)); __ cmp(tmp2, Operand(JS_REGEXP_TYPE)); @@ -3379,7 +3274,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ b(ne, &bailout); // Check that the array has fast elements. - __ CheckFastElements(scratch1, scratch2, &bailout); + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ tst(scratch2, Operand(1 << Map::kHasFastElements)); + __ b(eq, &bailout); // If the array has length zero, return the empty string. __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); @@ -3577,39 +3474,6 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { } -void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) { - ASSERT(args->length() == 1); - - // Load the function into r0. - VisitForAccumulatorValue(args->at(0)); - - // Prepare for the test. - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - context()->PrepareTest(&materialize_true, &materialize_false, - &if_true, &if_false, &fall_through); - - // Test for strict mode function. - __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, if_true); - - // Test for native function. - __ tst(r1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, if_true); - - // Not native or strict-mode function. - __ b(if_false); - - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - context()->Plug(if_true, if_false); -} - - void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { Handle<String> name = expr->name(); if (name->length() > 0 && name->Get(0) == '_') { @@ -3637,12 +3501,8 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { if (expr->is_jsruntime()) { // Call the JS runtime function. __ mov(r2, Operand(expr->name())); - RelocInfo::Mode mode = RelocInfo::CODE_TARGET; - Handle<Code> ic = - isolate()->stub_cache()->ComputeCallInitialize(arg_count, - NOT_IN_LOOP, - mode); - __ Call(ic, mode, expr->id()); + Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, NOT_IN_LOOP); + EmitCallIC(ic, RelocInfo::CODE_TARGET); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { @@ -3670,7 +3530,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { VisitForStackValue(prop->key()); __ mov(r1, Operand(Smi::FromInt(strict_mode_flag()))); __ push(r1); - __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::DELETE, CALL_JS); context()->Plug(r0); } } else if (var != NULL) { @@ -3682,7 +3542,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { __ mov(r1, Operand(var->name())); __ mov(r0, Operand(Smi::FromInt(kNonStrictMode))); __ Push(r2, r1, r0); - __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::DELETE, CALL_JS); context()->Plug(r0); } else if (var->AsSlot() != NULL && var->AsSlot()->type() != Slot::LOOKUP) { @@ -3750,7 +3610,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Comment cmt(masm_, "[ UnaryOperation (ADD)"); VisitForAccumulatorValue(expr->expression()); Label no_conversion; - __ JumpIfSmi(result_register(), &no_conversion); + __ tst(result_register(), Operand(kSmiTagMask)); + __ b(eq, &no_conversion); ToNumberStub convert_stub; __ CallStub(&convert_stub); __ bind(&no_conversion); @@ -3758,13 +3619,48 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { break; } - case Token::SUB: - EmitUnaryOperation(expr, "[ UnaryOperation (SUB)"); + case Token::SUB: { + Comment cmt(masm_, "[ UnaryOperation (SUB)"); + bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); + UnaryOverwriteMode overwrite = + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; + GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS); + // GenericUnaryOpStub expects the argument to be in the + // accumulator register r0. + VisitForAccumulatorValue(expr->expression()); + __ CallStub(&stub); + context()->Plug(r0); break; + } - case Token::BIT_NOT: - EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)"); + case Token::BIT_NOT: { + Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)"); + // The generic unary operation stub expects the argument to be + // in the accumulator register r0. + VisitForAccumulatorValue(expr->expression()); + Label done; + bool inline_smi_code = ShouldInlineSmiCase(expr->op()); + if (inline_smi_code) { + Label call_stub; + __ JumpIfNotSmi(r0, &call_stub); + __ mvn(r0, Operand(r0)); + // Bit-clear inverted smi-tag. + __ bic(r0, r0, Operand(kSmiTagMask)); + __ b(&done); + __ bind(&call_stub); + } + bool overwrite = expr->expression()->ResultOverwriteAllowed(); + UnaryOpFlags flags = inline_smi_code + ? NO_UNARY_SMI_CODE_IN_STUB + : NO_UNARY_FLAGS; + UnaryOverwriteMode mode = + overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; + GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags); + __ CallStub(&stub); + __ bind(&done); + context()->Plug(r0); break; + } default: UNREACHABLE(); @@ -3772,23 +3668,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } -void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, - const char* comment) { - // TODO(svenpanne): Allowing format strings in Comment would be nice here... - Comment cmt(masm_, comment); - bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); - UnaryOverwriteMode overwrite = - can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - UnaryOpStub stub(expr->op(), overwrite); - // UnaryOpStub expects the argument to be in the - // accumulator register r0. - VisitForAccumulatorValue(expr->expression()); - SetSourcePosition(expr->position()); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); - context()->Plug(r0); -} - - void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Comment cmnt(masm_, "[ CountOperation"); SetSourcePosition(expr->position()); @@ -3801,7 +3680,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } // Expression can only be a property, a global or a (parameter or local) - // slot. + // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; LhsKind assign_type = VARIABLE; Property* prop = expr->expression()->AsProperty(); @@ -3829,8 +3708,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ push(r0); EmitNamedPropertyLoad(prop); } else { - VisitForStackValue(prop->obj()); - VisitForAccumulatorValue(prop->key()); + if (prop->is_arguments_access()) { + VariableProxy* obj_proxy = prop->obj()->AsVariableProxy(); + __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0)); + __ push(r0); + __ mov(r0, Operand(prop->key()->AsLiteral()->handle())); + } else { + VisitForStackValue(prop->obj()); + VisitForAccumulatorValue(prop->key()); + } __ ldr(r1, MemOperand(sp, 0)); __ push(r0); EmitKeyedPropertyLoad(prop); @@ -3839,11 +3725,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // We need a second deoptimization point after loading the value // in case evaluating the property load my have a side effect. - if (assign_type == VARIABLE) { - PrepareForBailout(expr->expression(), TOS_REG); - } else { - PrepareForBailoutForId(expr->CountId(), TOS_REG); - } + PrepareForBailout(expr->increment(), TOS_REG); // Call ToNumber only if operand is not a smi. Label no_conversion; @@ -3894,9 +3776,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Record position before stub call. SetSourcePosition(expr->position()); - BinaryOpStub stub(Token::ADD, NO_OVERWRITE); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId()); - patch_site.EmitPatchInfo(); + TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE); + EmitCallIC(stub.GetCode(), &patch_site); __ bind(&done); // Store the value returned in r0. @@ -3924,10 +3805,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case NAMED_PROPERTY: { __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); __ pop(r1); - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { if (!context()->IsEffect()) { @@ -3941,10 +3822,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case KEYED_PROPERTY: { __ pop(r1); // Key. __ pop(r2); // Receiver. - Handle<Code> ic = is_strict_mode() - ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() - : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); + Handle<Code> ic(Builtins::builtin( + is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { if (!context()->IsEffect()) { @@ -3967,10 +3848,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { Comment cmnt(masm_, "Global variable"); __ ldr(r0, GlobalObjectOperand()); __ mov(r2, Operand(proxy->name())); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); // Use a regular load, not a contextual load, to avoid a reference // error. - __ Call(ic); + EmitCallIC(ic, RelocInfo::CODE_TARGET); PrepareForBailout(expr, TOS_REG); context()->Plug(r0); } else if (proxy != NULL && @@ -3993,83 +3874,104 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { context()->Plug(r0); } else { // This expression cannot throw a reference error at the top level. - VisitInCurrentContext(expr); + context()->HandleExpression(expr); } } -void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through) { +bool FullCodeGenerator::TryLiteralCompare(Token::Value op, + Expression* left, + Expression* right, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (op != Token::EQ && op != Token::EQ_STRICT) return false; + + // Check for the pattern: typeof <expression> == <string literal>. + Literal* right_literal = right->AsLiteral(); + if (right_literal == NULL) return false; + Handle<Object> right_literal_value = right_literal->handle(); + if (!right_literal_value->IsString()) return false; + UnaryOperation* left_unary = left->AsUnaryOperation(); + if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false; + Handle<String> check = Handle<String>::cast(right_literal_value); + { AccumulatorValueContext context(this); - VisitForTypeofValue(expr); + VisitForTypeofValue(left_unary->expression()); } PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_symbol())) { - __ JumpIfSmi(r0, if_true); + if (check->Equals(Heap::number_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_true); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_symbol())) { - __ JumpIfSmi(r0, if_false); + } else if (check->Equals(Heap::string_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); // Check for undetectable objects => false. - __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); - __ b(ge, if_false); + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ tst(r1, Operand(1 << Map::kIsUndetectable)); - Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_symbol())) { - __ CompareRoot(r0, Heap::kTrueValueRootIndex); + __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); + __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + __ b(eq, if_false); + __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + __ cmp(r1, Operand(FIRST_NONSTRING_TYPE)); + Split(lt, if_true, if_false, fall_through); + } else if (check->Equals(Heap::boolean_symbol())) { + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(r0, ip); __ b(eq, if_true); - __ CompareRoot(r0, Heap::kFalseValueRootIndex); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_symbol())) { - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); + } else if (check->Equals(Heap::undefined_symbol())) { + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r0, ip); __ b(eq, if_true); - __ JumpIfSmi(r0, if_false); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); // Check for undetectable objects => true. __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ tst(r1, Operand(1 << Map::kIsUndetectable)); - Split(ne, if_true, if_false, fall_through); - - } else if (check->Equals(isolate()->heap()->function_symbol())) { - __ JumpIfSmi(r0, if_false); - __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE); - Split(ge, if_true, if_false, fall_through); - - } else if (check->Equals(isolate()->heap()->object_symbol())) { - __ JumpIfSmi(r0, if_false); - __ CompareRoot(r0, Heap::kNullValueRootIndex); + __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); + __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); + } else if (check->Equals(Heap::function_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE); __ b(eq, if_true); + // Regular expressions => 'function' (they are callable). + __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE); + Split(eq, if_true, if_false, fall_through); + } else if (check->Equals(Heap::object_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(r0, ip); + __ b(eq, if_true); + // Regular expressions => 'function', not 'object'. + __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE); + __ b(eq, if_false); + // Check for undetectable objects => false. + __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset)); + __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); + __ cmp(r0, Operand(1 << Map::kIsUndetectable)); + __ b(eq, if_false); // Check for JS objects => true. - __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset)); + __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, if_false); - __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); - __ b(gt, if_false); - // Check for undetectable objects => false. - __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); - __ tst(r1, Operand(1 << Map::kIsUndetectable)); - Split(eq, if_true, if_false, fall_through); + __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE)); + Split(le, if_true, if_false, fall_through); } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); - Split(eq, if_true, if_false, fall_through); + return true; } @@ -4089,17 +3991,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // First we try a fast inlined version of the compare when one of // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { + Token::Value op = expr->op(); + Expression* left = expr->left(); + Expression* right = expr->right(); + if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) { context()->Plug(if_true, if_false); return; } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { case Token::IN: VisitForStackValue(expr->right()); - __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); + __ InvokeBuiltin(Builtins::IN, CALL_JS); PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL); __ LoadRoot(ip, Heap::kTrueValueRootIndex); __ cmp(r0, ip); @@ -4169,8 +4073,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // Record position and call the compare IC. SetSourcePosition(expr->position()); Handle<Code> ic = CompareIC::GetUninitialized(op); - __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); - patch_site.EmitPatchInfo(); + EmitCallIC(ic, &patch_site); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); __ cmp(r0, Operand(0)); Split(cond, if_true, if_false, fall_through); @@ -4203,7 +4106,8 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ cmp(r0, r1); __ b(eq, if_true); - __ JumpIfSmi(r0, if_false); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); // It can be an undetectable object. __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); @@ -4231,6 +4135,55 @@ Register FullCodeGenerator::context_register() { } +void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) { + ASSERT(mode == RelocInfo::CODE_TARGET || + mode == RelocInfo::CODE_TARGET_CONTEXT); + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1, r1, r2); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1, r1, r2); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2); + default: + break; + } + + __ Call(ic, mode); +} + + +void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) { + switch (ic->kind()) { + case Code::LOAD_IC: + __ IncrementCounter(&Counters::named_load_full, 1, r1, r2); + break; + case Code::KEYED_LOAD_IC: + __ IncrementCounter(&Counters::keyed_load_full, 1, r1, r2); + break; + case Code::STORE_IC: + __ IncrementCounter(&Counters::named_store_full, 1, r1, r2); + break; + case Code::KEYED_STORE_IC: + __ IncrementCounter(&Counters::keyed_store_full, 1, r1, r2); + default: + break; + } + + __ Call(ic, RelocInfo::CODE_TARGET); + if (patch_site != NULL && patch_site->is_bound()) { + patch_site->EmitPatchInfo(); + } else { + __ nop(); // Signals no inlined code. + } +} + + void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); __ str(value, MemOperand(fp, frame_offset)); @@ -4242,27 +4195,6 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) { } -void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { - Scope* declaration_scope = scope()->DeclarationScope(); - if (declaration_scope->is_global_scope()) { - // Contexts nested in the global context have a canonical empty function - // as their closure, not the anonymous closure containing the global - // code. Pass a smi sentinel and let the runtime look up the empty - // function. - __ mov(ip, Operand(Smi::FromInt(0))); - } else if (declaration_scope->is_eval_scope()) { - // Contexts created by a call to eval have the same closure as the - // context calling eval, not the anonymous closure containing the eval - // code. Fetch it from the context. - __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX)); - } else { - ASSERT(declaration_scope->is_function_scope()); - __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - } - __ push(ip); -} - - // ---------------------------------------------------------------------------- // Non-local control flow support. diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index dea875bad4..0fc6818703 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2006-2008 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -31,7 +31,7 @@ #include "assembler-arm.h" #include "code-stubs.h" -#include "codegen.h" +#include "codegen-inl.h" #include "disasm.h" #include "ic-inl.h" #include "runtime.h" @@ -79,14 +79,15 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, // elements map. // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); // Check that the receiver is a valid JS object. - __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE); __ b(lt, miss); // If this assert fails, we have to check upper bound too. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); GenerateGlobalInstanceTypeCheck(masm, t1, miss); @@ -104,6 +105,65 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, } +// Probe the string dictionary in the |elements| register. Jump to the +// |done| label if a property with the given name is found. Jump to +// the |miss| label otherwise. +static void GenerateStringDictionaryProbes(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register scratch1, + Register scratch2) { + // Assert that name contains a string. + if (FLAG_debug_code) __ AbortIfNotString(name); + + // Compute the capacity mask. + const int kCapacityOffset = StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); + __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int + __ sub(scratch1, scratch1, Operand(1)); + + const int kElementsStartOffset = StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + + // Generate an unrolled loop that performs a few probes before + // giving up. Measurements done on Gmail indicate that 2 probes + // cover ~93% of loads from dictionaries. + static const int kProbes = 4; + for (int i = 0; i < kProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); + if (i > 0) { + // Add the probe offset (i + i * i) left shifted to avoid right shifting + // the hash in a separate instruction. The value hash + i + i * i is right + // shifted in the following and instruction. + ASSERT(StringDictionary::GetProbeOffset(i) < + 1 << (32 - String::kHashFieldOffset)); + __ add(scratch2, scratch2, Operand( + StringDictionary::GetProbeOffset(i) << String::kHashShift)); + } + __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift)); + + // Scale the index by multiplying by the element size. + ASSERT(StringDictionary::kEntrySize == 3); + // scratch2 = scratch2 * 3. + __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); + + // Check if the key is identical to the name. + __ add(scratch2, elements, Operand(scratch2, LSL, 2)); + __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); + __ cmp(name, Operand(ip)); + if (i != kProbes - 1) { + __ b(eq, done); + } else { + __ b(ne, miss); + } + } +} + + // Helper function used from LoadIC/CallIC GenerateNormal. // // elements: Property dictionary. It is not clobbered if a jump to the miss @@ -131,13 +191,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + GenerateStringDictionaryProbes(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); // If probing finds an entry check that the value is a normal // property. @@ -180,13 +240,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + GenerateStringDictionaryProbes(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); // If probing finds an entry in the dictionary check that the value // is a normal property that is not read only. @@ -478,8 +538,7 @@ Object* CallIC_Miss(Arguments args); // The generated code falls through if both probes miss. static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, int argc, - Code::Kind kind, - Code::ExtraICState extra_ic_state) { + Code::Kind kind) { // ----------- S t a t e ------------- // -- r1 : receiver // -- r2 : name @@ -490,11 +549,10 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, Code::Flags flags = Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, - extra_ic_state, + Code::kNoExtraICState, NORMAL, argc); - Isolate::Current()->stub_cache()->GenerateProbe( - masm, flags, r1, r2, r3, r4, r5); + StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); // If the stub cache probing failed, the receiver might be a value. // For value objects, we use the map of the prototype objects for @@ -502,7 +560,8 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, // to probe. // // Check for number. - __ JumpIfSmi(r1, &number); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &number); __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE); __ b(ne, &non_number); __ bind(&number); @@ -532,8 +591,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm, // Probe the stub cache for the value object. __ bind(&probe); - Isolate::Current()->stub_cache()->GenerateProbe( - masm, flags, r1, r2, r3, r4, r5); + StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); __ bind(&miss); } @@ -546,7 +604,8 @@ static void GenerateFunctionTailCall(MacroAssembler* masm, // r1: function // Check that the value isn't a smi. - __ JumpIfSmi(r1, miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, miss); // Check that the value is a JSFunction. __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE); @@ -554,8 +613,7 @@ static void GenerateFunctionTailCall(MacroAssembler* masm, // Invoke the function. ParameterCount actual(argc); - __ InvokeFunction(r1, actual, JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeFunction(r1, actual, JUMP_FUNCTION); } @@ -581,20 +639,16 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) { } -static void GenerateCallMiss(MacroAssembler* masm, - int argc, - IC::UtilityId id, - Code::ExtraICState extra_ic_state) { +static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // ----------------------------------- - Isolate* isolate = masm->isolate(); if (id == IC::kCallIC_Miss) { - __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4); + __ IncrementCounter(&Counters::call_miss, 1, r3, r4); } else { - __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4); + __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4); } // Get the receiver of the function from the stack. @@ -607,7 +661,7 @@ static void GenerateCallMiss(MacroAssembler* masm, // Call the entry. __ mov(r0, Operand(2)); - __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); + __ mov(r1, Operand(ExternalReference(IC_Utility(id)))); CEntryStub stub(1); __ CallStub(&stub); @@ -621,7 +675,8 @@ static void GenerateCallMiss(MacroAssembler* masm, if (id == IC::kCallIC_Miss) { Label invoke, global; __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver - __ JumpIfSmi(r2, &invoke); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &invoke); __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE); __ b(eq, &global); __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE)); @@ -635,33 +690,22 @@ static void GenerateCallMiss(MacroAssembler* masm, } // Invoke the function. - CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state) - ? CALL_AS_FUNCTION - : CALL_AS_METHOD; ParameterCount actual(argc); - __ InvokeFunction(r1, - actual, - JUMP_FUNCTION, - NullCallWrapper(), - call_kind); + __ InvokeFunction(r1, actual, JUMP_FUNCTION); } -void CallIC::GenerateMiss(MacroAssembler* masm, - int argc, - Code::ExtraICState extra_ic_state) { +void CallIC::GenerateMiss(MacroAssembler* masm, int argc) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // ----------------------------------- - GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state); + GenerateCallMiss(masm, argc, IC::kCallIC_Miss); } -void CallIC::GenerateMegamorphic(MacroAssembler* masm, - int argc, - Code::ExtraICState extra_ic_state) { +void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address @@ -669,8 +713,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, // Get the receiver of the function from the stack into r1. __ ldr(r1, MemOperand(sp, argc * kPointerSize)); - GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state); - GenerateMiss(masm, argc, extra_ic_state); + GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC); + GenerateMiss(masm, argc); } @@ -681,7 +725,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) { // ----------------------------------- GenerateCallNormal(masm, argc); - GenerateMiss(masm, argc, Code::kNoExtraICState); + GenerateMiss(masm, argc); } @@ -691,7 +735,7 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) { // -- lr : return address // ----------------------------------- - GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState); + GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss); } @@ -719,8 +763,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { GenerateFastArrayLoad( masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3); + __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3); __ bind(&do_call); // receiver in r1 is not used after this point. @@ -739,13 +782,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ mov(r0, Operand(r2, ASR, kSmiTagSize)); // r0: untagged index GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5); - __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3); + __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3); __ jmp(&do_call); __ bind(&slow_load); // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. - __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); + __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3); __ EnterInternalFrame(); __ push(r2); // save the key __ Push(r1, r2); // pass the receiver and the key @@ -772,15 +815,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ b(ne, &lookup_monomorphic_cache); GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4); - __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3); + __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3); __ jmp(&do_call); __ bind(&lookup_monomorphic_cache); - __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3); - GenerateMonomorphicCacheProbe(masm, - argc, - Code::KEYED_CALL_IC, - Code::kNoExtraICState); + __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3); + GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC); // Fall through on miss. __ bind(&slow_call); @@ -790,7 +830,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // - the value loaded is not a function, // - there is hope that the runtime will create a monomorphic call stub // that will get fetched next time. - __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3); + __ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3); GenerateMiss(masm, argc); __ bind(&index_string); @@ -808,7 +848,8 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // Check if the name is a string. Label miss; - __ JumpIfSmi(r2, &miss); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &miss); __ IsObjectJSStringType(r2, r0, &miss); GenerateCallNormal(masm, argc); @@ -832,8 +873,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, NOT_IN_LOOP, MONOMORPHIC); - Isolate::Current()->stub_cache()->GenerateProbe( - masm, flags, r0, r2, r3, r4, r5); + StubCache::GenerateProbe(masm, flags, r0, r2, r3, r4, r5); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -868,205 +908,244 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { // -- r0 : receiver // -- sp[0] : receiver // ----------------------------------- - Isolate* isolate = masm->isolate(); - __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4); + __ IncrementCounter(&Counters::load_miss, 1, r3, r4); __ mov(r3, r0); __ Push(r3, r2); // Perform tail call to the entry. - ExternalReference ref = - ExternalReference(IC_Utility(kLoadIC_Miss), isolate); + ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss)); __ TailCallExternalReference(ref, 2, 1); } +// Returns the code marker, or the 0 if the code is not marked. +static inline int InlinedICSiteMarker(Address address, + Address* inline_end_address) { + if (V8::UseCrankshaft()) return false; + + // If the instruction after the call site is not the pseudo instruction nop1 + // then this is not related to an inlined in-object property load. The nop1 + // instruction is located just after the call to the IC in the deferred code + // handling the miss in the inlined code. After the nop1 instruction there is + // a branch instruction for jumping back from the deferred code. + Address address_after_call = address + Assembler::kCallTargetAddressOffset; + Instr instr_after_call = Assembler::instr_at(address_after_call); + int code_marker = MacroAssembler::GetCodeMarker(instr_after_call); + + // A negative result means the code is not marked. + if (code_marker <= 0) return 0; + + Address address_after_nop = address_after_call + Assembler::kInstrSize; + Instr instr_after_nop = Assembler::instr_at(address_after_nop); + // There may be some reg-reg move and frame merging code to skip over before + // the branch back from the DeferredReferenceGetKeyedValue code to the inlined + // code. + while (!Assembler::IsBranch(instr_after_nop)) { + address_after_nop += Assembler::kInstrSize; + instr_after_nop = Assembler::instr_at(address_after_nop); + } + + // Find the end of the inlined code for handling the load. + int b_offset = + Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta; + ASSERT(b_offset < 0); // Jumping back from deferred code. + *inline_end_address = address_after_nop + b_offset; -static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, - Register object, - Register key, - Register scratch1, - Register scratch2, - Register scratch3, - Label* unmapped_case, - Label* slow_case) { - Heap* heap = masm->isolate()->heap(); - - // Check that the receiver is a JSObject. Because of the map check - // later, we do not need to check for interceptors or whether it - // requires access checks. - __ JumpIfSmi(object, slow_case); - // Check that the object is some kind of JSObject. - __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); - __ b(lt, slow_case); - - // Check that the key is a positive smi. - __ tst(key, Operand(0x8000001)); - __ b(ne, slow_case); - - // Load the elements into scratch1 and check its map. - Handle<Map> arguments_map(heap->non_strict_arguments_elements_map()); - __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); - __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); - - // Check if element is in the range of mapped arguments. If not, jump - // to the unmapped lookup with the parameter map in scratch1. - __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); - __ sub(scratch2, scratch2, Operand(Smi::FromInt(2))); - __ cmp(key, Operand(scratch2)); - __ b(cs, unmapped_case); - - // Load element index and check whether it is the hole. - const int kOffset = - FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; - - __ mov(scratch3, Operand(kPointerSize >> 1)); - __ mul(scratch3, key, scratch3); - __ add(scratch3, scratch3, Operand(kOffset)); - - __ ldr(scratch2, MemOperand(scratch1, scratch3)); - __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); - __ cmp(scratch2, scratch3); - __ b(eq, unmapped_case); - - // Load value from context and return it. We can reuse scratch1 because - // we do not jump to the unmapped lookup (which requires the parameter - // map in scratch1). - __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ mov(scratch3, Operand(kPointerSize >> 1)); - __ mul(scratch3, scratch2, scratch3); - __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); - return MemOperand(scratch1, scratch3); + return code_marker; } -static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, - Register key, - Register parameter_map, - Register scratch, - Label* slow_case) { - // Element is in arguments backing store, which is referenced by the - // second element of the parameter_map. The parameter_map register - // must be loaded with the parameter map of the arguments object and is - // overwritten. - const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; - Register backing_store = parameter_map; - __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); - Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); - __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, - DONT_DO_SMI_CHECK); - __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); - __ cmp(key, Operand(scratch)); - __ b(cs, slow_case); - __ mov(scratch, Operand(kPointerSize >> 1)); - __ mul(scratch, key, scratch); - __ add(scratch, - scratch, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - return MemOperand(backing_store, scratch); +bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { + if (V8::UseCrankshaft()) return false; + + // Find the end of the inlined code for handling the load if this is an + // inlined IC call site. + Address inline_end_address; + if (InlinedICSiteMarker(address, &inline_end_address) + != Assembler::PROPERTY_ACCESS_INLINED) { + return false; + } + + // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]). + // The immediate must be representable in 12 bits. + ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12)); + Address ldr_property_instr_address = + inline_end_address - Assembler::kInstrSize; + ASSERT(Assembler::IsLdrRegisterImmediate( + Assembler::instr_at(ldr_property_instr_address))); + Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address); + ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset( + ldr_property_instr, offset - kHeapObjectTag); + Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr); + + // Indicate that code has changed. + CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize); + + // Patch the map check. + // For PROPERTY_ACCESS_INLINED, the load map instruction is generated + // 4 instructions before the end of the inlined code. + // See codgen-arm.cc CodeGenerator::EmitNamedLoad. + int ldr_map_offset = -4; + Address ldr_map_instr_address = + inline_end_address + ldr_map_offset * Assembler::kInstrSize; + Assembler::set_target_address_at(ldr_map_instr_address, + reinterpret_cast<Address>(map)); + return true; } -void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label slow, notin; - MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, ¬in, &slow); - __ ldr(r0, mapped_location); - __ Ret(); - __ bind(¬in); - // The unmapped lookup expects that the parameter map is in r2. - MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow); - __ ldr(r2, unmapped_location); - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - __ cmp(r2, r3); - __ b(eq, &slow); - __ mov(r0, r2); - __ Ret(); - __ bind(&slow); - GenerateMiss(masm, false); +bool LoadIC::PatchInlinedContextualLoad(Address address, + Object* map, + Object* cell, + bool is_dont_delete) { + // Find the end of the inlined code for handling the contextual load if + // this is inlined IC call site. + Address inline_end_address; + int marker = InlinedICSiteMarker(address, &inline_end_address); + if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) || + (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) { + return false; + } + // On ARM we don't rely on the is_dont_delete argument as the hint is already + // embedded in the code marker. + bool marker_is_dont_delete = + marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE; + + // These are the offsets from the end of the inlined code. + // See codgen-arm.cc CodeGenerator::EmitNamedLoad. + int ldr_map_offset = marker_is_dont_delete ? -5: -8; + int ldr_cell_offset = marker_is_dont_delete ? -2: -5; + if (FLAG_debug_code && marker_is_dont_delete) { + // Three extra instructions were generated to check for the_hole_value. + ldr_map_offset -= 3; + ldr_cell_offset -= 3; + } + Address ldr_map_instr_address = + inline_end_address + ldr_map_offset * Assembler::kInstrSize; + Address ldr_cell_instr_address = + inline_end_address + ldr_cell_offset * Assembler::kInstrSize; + + // Patch the map check. + Assembler::set_target_address_at(ldr_map_instr_address, + reinterpret_cast<Address>(map)); + // Patch the cell address. + Assembler::set_target_address_at(ldr_cell_instr_address, + reinterpret_cast<Address>(cell)); + + return true; } -void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - Label slow, notin; - MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow); - __ str(r0, mapped_location); - __ add(r6, r3, r5); - __ RecordWrite(r3, r6, r9); - __ Ret(); - __ bind(¬in); - // The unmapped lookup expects that the parameter map is in r3. - MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); - __ str(r0, unmapped_location); - __ add(r6, r3, r4); - __ RecordWrite(r3, r6, r9); - __ Ret(); - __ bind(&slow); - GenerateMiss(masm, false); +bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { + if (V8::UseCrankshaft()) return false; + + // Find the end of the inlined code for the store if there is an + // inlined version of the store. + Address inline_end_address; + if (InlinedICSiteMarker(address, &inline_end_address) + != Assembler::PROPERTY_ACCESS_INLINED) { + return false; + } + + // Compute the address of the map load instruction. + Address ldr_map_instr_address = + inline_end_address - + (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() * + Assembler::kInstrSize); + + // Update the offsets if initializing the inlined store. No reason + // to update the offsets when clearing the inlined version because + // it will bail out in the map check. + if (map != Heap::null_value()) { + // Patch the offset in the actual store instruction. + Address str_property_instr_address = + ldr_map_instr_address + 3 * Assembler::kInstrSize; + Instr str_property_instr = Assembler::instr_at(str_property_instr_address); + ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr)); + str_property_instr = Assembler::SetStrRegisterImmediateOffset( + str_property_instr, offset - kHeapObjectTag); + Assembler::instr_at_put(str_property_instr_address, str_property_instr); + + // Patch the offset in the add instruction that is part of the + // write barrier. + Address add_offset_instr_address = + str_property_instr_address + Assembler::kInstrSize; + Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address); + ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr)); + add_offset_instr = Assembler::SetAddRegisterImmediateOffset( + add_offset_instr, offset - kHeapObjectTag); + Assembler::instr_at_put(add_offset_instr_address, add_offset_instr); + + // Indicate that code has changed. + CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize); + } + + // Patch the map check. + Assembler::set_target_address_at(ldr_map_instr_address, + reinterpret_cast<Address>(map)); + + return true; } -void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm, - int argc) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label slow, notin; - // Load receiver. - __ ldr(r1, MemOperand(sp, argc * kPointerSize)); - MemOperand mapped_location = - GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, ¬in, &slow); - __ ldr(r1, mapped_location); - GenerateFunctionTailCall(masm, argc, &slow, r3); - __ bind(¬in); - // The unmapped lookup expects that the parameter map is in r3. - MemOperand unmapped_location = - GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow); - __ ldr(r1, unmapped_location); - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); - __ cmp(r1, r3); - __ b(eq, &slow); - GenerateFunctionTailCall(masm, argc, &slow, r3); - __ bind(&slow); - GenerateMiss(masm, argc); +bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { + if (V8::UseCrankshaft()) return false; + + Address inline_end_address; + if (InlinedICSiteMarker(address, &inline_end_address) + != Assembler::PROPERTY_ACCESS_INLINED) { + return false; + } + + // Patch the map check. + Address ldr_map_instr_address = + inline_end_address - + (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() * + Assembler::kInstrSize); + Assembler::set_target_address_at(ldr_map_instr_address, + reinterpret_cast<Address>(map)); + return true; +} + + +bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { + if (V8::UseCrankshaft()) return false; + + // Find the end of the inlined code for handling the store if this is an + // inlined IC call site. + Address inline_end_address; + if (InlinedICSiteMarker(address, &inline_end_address) + != Assembler::PROPERTY_ACCESS_INLINED) { + return false; + } + + // Patch the map check. + Address ldr_map_instr_address = + inline_end_address - + (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch * + Assembler::kInstrSize); + Assembler::set_target_address_at(ldr_map_instr_address, + reinterpret_cast<Address>(map)); + return true; } Object* KeyedLoadIC_Miss(Arguments args); -void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key // -- r1 : receiver // ----------------------------------- - Isolate* isolate = masm->isolate(); - __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4); + __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4); __ Push(r1, r0); - // Perform tail call to the entry. - ExternalReference ref = force_generic - ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate) - : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); - + ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); __ TailCallExternalReference(ref, 2, 1); } @@ -1091,13 +1170,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- r1 : receiver // ----------------------------------- Label slow, check_string, index_smi, index_string, property_array_property; - Label probe_dictionary, check_number_dictionary; + Label check_pixel_array, probe_dictionary, check_number_dictionary; Register key = r0; Register receiver = r1; - Isolate* isolate = masm->isolate(); - // Check that the key is a smi. __ JumpIfNotSmi(key, &check_string); __ bind(&index_smi); @@ -1107,18 +1184,35 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateKeyedLoadReceiverCheck( masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); - // Check the receiver's map to see if it has fast elements. - __ CheckFastElements(r2, r3, &check_number_dictionary); + // Check the "has fast elements" bit in the receiver's map which is + // now in r2. + __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset)); + __ tst(r3, Operand(1 << Map::kHasFastElements)); + __ b(eq, &check_pixel_array); GenerateFastArrayLoad( masm, receiver, key, r4, r3, r2, r0, NULL, &slow); - __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3); __ Ret(); - __ bind(&check_number_dictionary); - __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset)); + // Check whether the elements is a pixel array. + // r0: key + // r1: receiver + __ bind(&check_pixel_array); + + GenerateFastPixelArrayLoad(masm, + r1, + r0, + r3, + r4, + r2, + r5, + r0, + &check_number_dictionary, + NULL, + &slow); + __ bind(&check_number_dictionary); // Check whether the elements is a number dictionary. // r0: key // r3: elements map @@ -1132,8 +1226,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Slow case, key and receiver still in r0 and r1. __ bind(&slow); - __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), - 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3); GenerateRuntimeGetProperty(masm); __ bind(&check_string); @@ -1160,8 +1253,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load the key (consisting of map and symbol) from the cache and // check for match. - ExternalReference cache_keys = - ExternalReference::keyed_lookup_cache_keys(isolate); + ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys(); __ mov(r4, Operand(cache_keys)); __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1)); __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol. @@ -1176,8 +1268,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // r1 : receiver // r2 : receiver's map // r3 : lookup cache index - ExternalReference cache_field_offsets = - ExternalReference::keyed_lookup_cache_field_offsets(isolate); + ExternalReference cache_field_offsets + = ExternalReference::keyed_lookup_cache_field_offsets(); __ mov(r4, Operand(cache_field_offsets)); __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset)); @@ -1189,8 +1281,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ add(r6, r6, r5); // Index from start of object. __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag. __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2)); - __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3); __ Ret(); // Load property array property. @@ -1198,8 +1289,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset)); __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2)); - __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3); __ Ret(); // Do a quick inline probe of the receiver's dictionary, if it @@ -1213,8 +1303,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateGlobalInstanceTypeCheck(masm, r2, &slow); // Load the property to r0. GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4); - __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), - 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3); __ Ret(); __ bind(&index_string); @@ -1254,7 +1343,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { char_at_generator.GenerateSlow(masm, call_helper); __ bind(&miss); - GenerateMiss(masm, false); + GenerateMiss(masm); } @@ -1287,37 +1376,15 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { __ Push(r1, r0); // Receiver, key. // Perform tail call to the entry. - __ TailCallExternalReference( - ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), - masm->isolate()), - 2, - 1); + __ TailCallExternalReference(ExternalReference( + IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1); __ bind(&slow); - GenerateMiss(masm, false); -} - - -void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { - // ---------- S t a t e -------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - - // Push receiver, key and value for runtime call. - __ Push(r2, r1, r0); - - ExternalReference ref = force_generic - ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric), - masm->isolate()) - : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); - __ TailCallExternalReference(ref, 3, 1); + GenerateMiss(masm); } -void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key @@ -1328,10 +1395,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { // Push receiver, key and value for runtime call. __ Push(r2, r1, r0); - // The slow case calls into the runtime to complete the store without causing - // an IC miss that would otherwise cause a transition to the generic stub. - ExternalReference ref = - ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); + ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss)); __ TailCallExternalReference(ref, 3, 1); } @@ -1364,7 +1428,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, fast, array, extra; + Label slow, fast, array, extra, check_pixel_array; // Register usage. Register value = r0; @@ -1374,9 +1438,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // r4 and r5 are used as general scratch registers. // Check that the key is a smi. - __ JumpIfNotSmi(key, &slow); + __ tst(key, Operand(kSmiTagMask)); + __ b(ne, &slow); // Check that the object isn't a smi. - __ JumpIfSmi(receiver, &slow); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, &slow); // Get the map of the object. __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need @@ -1388,13 +1454,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ cmp(r4, Operand(JS_ARRAY_TYPE)); __ b(eq, &array); - // Check that the object is some kind of JSObject. - __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE)); + // Check that the object is some kind of JS object. + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, &slow); - __ cmp(r4, Operand(JS_PROXY_TYPE)); - __ b(eq, &slow); - __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(eq, &slow); // Object case: Check key against length in the elements array. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -1402,7 +1464,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r4, ip); - __ b(ne, &slow); + __ b(ne, &check_pixel_array); // Check array bounds. Both the key and the length of FixedArray are smis. __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); @@ -1416,6 +1478,24 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // r2: receiver. GenerateRuntimeSetProperty(masm, strict_mode); + // Check whether the elements is a pixel array. + // r4: elements map. + __ bind(&check_pixel_array); + GenerateFastPixelArrayStore(masm, + r2, + r1, + r0, + elements, + r4, + r5, + r6, + false, + false, + NULL, + &slow, + &slow, + &slow); + // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one // element to the array by writing to array[array.length]. @@ -1479,9 +1559,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, NOT_IN_LOOP, MONOMORPHIC, strict_mode); - - Isolate::Current()->stub_cache()->GenerateProbe( - masm, flags, r1, r2, r3, r4, r5); + StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1499,8 +1577,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { __ Push(r1, r2, r0); // Perform tail call to the entry. - ExternalReference ref = - ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); + ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss)); __ TailCallExternalReference(ref, 3, 1); } @@ -1545,8 +1622,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) { // Prepare tail call to StoreIC_ArrayLength. __ Push(receiver, value); - ExternalReference ref = - ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate()); + ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength)); __ TailCallExternalReference(ref, 2, 1); __ bind(&miss); @@ -1567,13 +1643,11 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss); GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->store_normal_hit(), - 1, r4, r5); + __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5); __ Ret(); __ bind(&miss); - __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5); + __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5); GenerateMiss(masm); } diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc new file mode 100644 index 0000000000..df370c4437 --- /dev/null +++ b/deps/v8/src/arm/jump-target-arm.cc @@ -0,0 +1,174 @@ +// Copyright 2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_ARM) + +#include "codegen-inl.h" +#include "jump-target-inl.h" +#include "register-allocator-inl.h" +#include "virtual-frame-inl.h" + +namespace v8 { +namespace internal { + +// ------------------------------------------------------------------------- +// JumpTarget implementation. + +#define __ ACCESS_MASM(cgen()->masm()) + +void JumpTarget::DoJump() { + ASSERT(cgen()->has_valid_frame()); + // Live non-frame registers are not allowed at unconditional jumps + // because we have no way of invalidating the corresponding results + // which are still live in the C++ code. + ASSERT(cgen()->HasValidEntryRegisters()); + + if (entry_frame_set_) { + if (entry_label_.is_bound()) { + // If we already bound and generated code at the destination then it + // is too late to ask for less optimistic type assumptions. + ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame())); + } + // There already a frame expectation at the target. + cgen()->frame()->MergeTo(&entry_frame_); + cgen()->DeleteFrame(); + } else { + // Clone the current frame to use as the expected one at the target. + set_entry_frame(cgen()->frame()); + // Zap the fall-through frame since the jump was unconditional. + RegisterFile empty; + cgen()->SetFrame(NULL, &empty); + } + if (entry_label_.is_bound()) { + // You can't jump backwards to an already bound label unless you admitted + // up front that this was a bidirectional jump target. Bidirectional jump + // targets will zap their type info when bound in case some later virtual + // frame with less precise type info branches to them. + ASSERT(direction_ != FORWARD_ONLY); + } + __ jmp(&entry_label_); +} + + +void JumpTarget::DoBranch(Condition cond, Hint ignored) { + ASSERT(cgen()->has_valid_frame()); + + if (entry_frame_set_) { + if (entry_label_.is_bound()) { + // If we already bound and generated code at the destination then it + // is too late to ask for less optimistic type assumptions. + ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame())); + } + // We have an expected frame to merge to on the backward edge. + cgen()->frame()->MergeTo(&entry_frame_, cond); + } else { + // Clone the current frame to use as the expected one at the target. + set_entry_frame(cgen()->frame()); + } + if (entry_label_.is_bound()) { + // You can't branch backwards to an already bound label unless you admitted + // up front that this was a bidirectional jump target. Bidirectional jump + // targets will zap their type info when bound in case some later virtual + // frame with less precise type info branches to them. + ASSERT(direction_ != FORWARD_ONLY); + } + __ b(cond, &entry_label_); + if (cond == al) { + cgen()->DeleteFrame(); + } +} + + +void JumpTarget::Call() { + // Call is used to push the address of the catch block on the stack as + // a return address when compiling try/catch and try/finally. We + // fully spill the frame before making the call. The expected frame + // at the label (which should be the only one) is the spilled current + // frame plus an in-memory return address. The "fall-through" frame + // at the return site is the spilled current frame. + ASSERT(cgen()->has_valid_frame()); + // There are no non-frame references across the call. + ASSERT(cgen()->HasValidEntryRegisters()); + ASSERT(!is_linked()); + + // Calls are always 'forward' so we use a copy of the current frame (plus + // one for a return address) as the expected frame. + ASSERT(!entry_frame_set_); + VirtualFrame target_frame = *cgen()->frame(); + target_frame.Adjust(1); + set_entry_frame(&target_frame); + + __ bl(&entry_label_); +} + + +void JumpTarget::DoBind() { + ASSERT(!is_bound()); + + // Live non-frame registers are not allowed at the start of a basic + // block. + ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters()); + + if (cgen()->has_valid_frame()) { + if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo(); + // If there is a current frame we can use it on the fall through. + if (!entry_frame_set_) { + entry_frame_ = *cgen()->frame(); + entry_frame_set_ = true; + } else { + cgen()->frame()->MergeTo(&entry_frame_); + // On fall through we may have to merge both ways. + if (direction_ != FORWARD_ONLY) { + // This will not need to adjust the virtual frame entries that are + // register allocated since that was done above and they now match. + // But it does need to adjust the entry_frame_ of this jump target + // to make it potentially less optimistic. Later code can branch back + // to this jump target and we need to assert that that code does not + // have weaker assumptions about types. + entry_frame_.MergeTo(cgen()->frame()); + } + } + } else { + // If there is no current frame we must have an entry frame which we can + // copy. + ASSERT(entry_frame_set_); + RegisterFile empty; + cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty); + } + + __ bind(&entry_label_); +} + + +#undef __ + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 63e3169219..c04e5ca8e7 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -25,8 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" - #include "lithium-allocator-inl.h" #include "arm/lithium-arm.h" #include "arm/lithium-codegen-arm.h" @@ -61,21 +59,22 @@ void LOsrEntry::MarkSpilledRegister(int allocation_index, #ifdef DEBUG void LInstruction::VerifyCall() { - // Call instructions can use only fixed registers as temporaries and - // outputs because all registers are blocked by the calling convention. - // Inputs operands must use a fixed register or use-at-start policy or - // a non-register policy. + // Call instructions can use only fixed registers as + // temporaries and outputs because all registers + // are blocked by the calling convention. + // Inputs must use a fixed register. ASSERT(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() || !LUnallocated::cast(Output())->HasRegisterPolicy()); - for (UseIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() || - operand->IsUsedAtStart()); + for (UseIterator it(this); it.HasNext(); it.Advance()) { + LOperand* operand = it.Next(); + ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() || + !LUnallocated::cast(operand)->HasRegisterPolicy()); } - for (TempIterator it(this); !it.Done(); it.Advance()) { - LUnallocated* operand = LUnallocated::cast(it.Current()); - ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy()); + for (TempIterator it(this); it.HasNext(); it.Advance()) { + LOperand* operand = it.Next(); + ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() || + !LUnallocated::cast(operand)->HasRegisterPolicy()); } } #endif @@ -111,18 +110,21 @@ void LInstruction::PrintTo(StringStream* stream) { template<int R, int I, int T> void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) { stream->Add("= "); - for (int i = 0; i < inputs_.length(); i++) { - if (i > 0) stream->Add(" "); - inputs_[i]->PrintTo(stream); - } + inputs_.PrintOperandsTo(stream); } template<int R, int I, int T> void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) { - for (int i = 0; i < results_.length(); i++) { + results_.PrintOperandsTo(stream); +} + + +template<typename T, int N> +void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) { + for (int i = 0; i < N; i++) { if (i > 0) stream->Add(" "); - results_[i]->PrintTo(stream); + elems_[i]->PrintTo(stream); } } @@ -147,7 +149,7 @@ bool LGap::IsRedundant() const { } -void LGap::PrintDataTo(StringStream* stream) { +void LGap::PrintDataTo(StringStream* stream) const { for (int i = 0; i < 4; i++) { stream->Add("("); if (parallel_moves_[i] != NULL) { @@ -234,13 +236,6 @@ void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { } -void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { - stream->Add("if is_undetectable("); - InputAt(0)->PrintTo(stream); - stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); -} - - void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if has_instance_type("); InputAt(0)->PrintTo(stream); @@ -265,6 +260,12 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { } +void LTypeofIs::PrintDataTo(StringStream* stream) { + InputAt(0)->PrintTo(stream); + stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString()); +} + + void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if typeof "); InputAt(0)->PrintTo(stream); @@ -298,13 +299,6 @@ void LStoreContextSlot::PrintDataTo(StringStream* stream) { } -void LInvokeFunction::PrintDataTo(StringStream* stream) { - stream->Add("= "); - InputAt(0)->PrintTo(stream); - stream->Add(" #%d / ", arity()); -} - - void LCallKeyed::PrintDataTo(StringStream* stream) { stream->Add("[r2] #%d / ", arity()); } @@ -334,6 +328,13 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LClassOfTest::PrintDataTo(StringStream* stream) { + stream->Add("= class_of_test("); + InputAt(0)->PrintTo(stream); + stream->Add(", \"%o\")", *hydrogen()->class_name()); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); @@ -381,9 +382,8 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { } -LChunk::LChunk(CompilationInfo* info, HGraph* graph) +LChunk::LChunk(HGraph* graph) : spill_slot_count_(0), - info_(info), graph_(graph), instructions_(32), pointer_maps_(8), @@ -420,7 +420,8 @@ void LChunk::MarkEmptyBlocks() { LLabel* label = LLabel::cast(first_instr); if (last_instr->IsGoto()) { LGoto* goto_instr = LGoto::cast(last_instr); - if (label->IsRedundant() && + if (!goto_instr->include_stack_check() && + label->IsRedundant() && !label->is_loop_header()) { bool can_eliminate = true; for (int i = first + 1; i < last && can_eliminate; ++i) { @@ -445,7 +446,7 @@ void LChunk::MarkEmptyBlocks() { void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { - LInstructionGap* gap = new LInstructionGap(block); + LGap* gap = new LGap(block); int index = -1; if (instr->IsControl()) { instructions_.Add(gap); @@ -473,7 +474,7 @@ int LChunk::GetParameterStackSlot(int index) const { // shift all parameter indexes down by the number of parameters, and // make sure they end up negative so they are distinguishable from // spill slots. - int result = index - info()->scope()->num_parameters() - 1; + int result = index - graph()->info()->scope()->num_parameters() - 1; ASSERT(result < 0); return result; } @@ -481,7 +482,7 @@ int LChunk::GetParameterStackSlot(int index) const { // A parameter relative to ebp in the arguments stub. int LChunk::ParameterAt(int index) { ASSERT(-1 <= index); // -1 is the receiver. - return (1 + info()->scope()->num_parameters() - index) * + return (1 + graph()->info()->scope()->num_parameters() - index) * kPointerSize; } @@ -520,7 +521,7 @@ Representation LChunk::LookupLiteralRepresentation( LChunk* LChunkBuilder::Build() { ASSERT(is_unused()); - chunk_ = new LChunk(info(), graph()); + chunk_ = new LChunk(graph()); HPhase phase("Building chunk", chunk_); status_ = BUILDING; const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); @@ -537,8 +538,8 @@ LChunk* LChunkBuilder::Build() { void LChunkBuilder::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); - PrintF("Aborting LChunk building in @\"%s\": ", *name); + SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); + PrintF("Aborting LChunk building in @\"%s\": ", *debug_name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -791,11 +792,6 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { } -LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { - return AssignEnvironment(new LDeoptimize); -} - - LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { return AssignEnvironment(new LDeoptimize); } @@ -809,7 +805,7 @@ LInstruction* LChunkBuilder::DoBit(Token::Value op, LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); - return DefineAsRegister(new LBitI(op, left, right)); + return DefineSameAsFirst(new LBitI(op, left, right)); } else { ASSERT(instr->representation().IsTagged()); ASSERT(instr->left()->representation().IsTagged()); @@ -848,25 +844,27 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, right = chunk_->DefineConstantOperand(constant); constant_value = constant->Integer32Value() & 0x1f; } else { - right = UseRegisterAtStart(right_value); + right = UseRegister(right_value); } // Shift operations can only deoptimize if we do a logical shift // by 0 and the result cannot be truncated to int32. - bool may_deopt = (op == Token::SHR && constant_value == 0); - bool does_deopt = false; - if (may_deopt) { - for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { - if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) { - does_deopt = true; + bool can_deopt = (op == Token::SHR && constant_value == 0); + if (can_deopt) { + bool can_truncate = true; + for (int i = 0; i < instr->uses()->length(); i++) { + if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) { + can_truncate = false; break; } } + can_deopt = !can_truncate; } LInstruction* result = - DefineAsRegister(new LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; + DefineSameAsFirst(new LShiftI(op, left, right, can_deopt)); + if (can_deopt) AssignEnvironment(result); + return result; } @@ -875,11 +873,10 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); - ASSERT(op != Token::MOD); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); LArithmeticD* result = new LArithmeticD(op, left, right); - return DefineAsRegister(result); + return DefineSameAsFirst(result); } @@ -977,7 +974,18 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - instr->set_hydrogen_value(current); + if (current->IsTest() && !instr->IsGoto()) { + ASSERT(instr->IsControl()); + HTest* test = HTest::cast(current); + instr->set_hydrogen_value(test->value()); + HBasicBlock* first = test->FirstSuccessor(); + HBasicBlock* second = test->SecondSuccessor(); + ASSERT(first != NULL && second != NULL); + instr->SetBranchTargets(first->block_id(), second->block_id()); + } else { + instr->set_hydrogen_value(current); + } + chunk_->AddInstruction(instr, current_block_); } current_instruction_ = old_current; @@ -999,8 +1007,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { outer); int argument_index = 0; for (int i = 0; i < value_count; ++i) { - if (hydrogen_env->is_special_index(i)) continue; - HValue* value = hydrogen_env->values()->at(i); LOperand* op = NULL; if (value->IsArgumentsObject()) { @@ -1018,23 +1024,106 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new LGoto(instr->FirstSuccessor()->block_id()); + LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(), + instr->include_stack_check()); + if (instr->include_stack_check()) result = AssignPointerMap(result); + return result; } -LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { +LInstruction* LChunkBuilder::DoTest(HTest* instr) { HValue* v = instr->value(); if (v->EmitAtUses()) { - HBasicBlock* successor = HConstant::cast(v)->ToBoolean() - ? instr->FirstSuccessor() - : instr->SecondSuccessor(); - return new LGoto(successor->block_id()); + if (v->IsClassOfTest()) { + HClassOfTest* compare = HClassOfTest::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LClassOfTestAndBranch(UseTempRegister(compare->value()), + TempRegister()); + } else if (v->IsCompare()) { + HCompare* compare = HCompare::cast(v); + Token::Value op = compare->token(); + HValue* left = compare->left(); + HValue* right = compare->right(); + Representation r = compare->GetInputRepresentation(); + if (r.IsInteger32()) { + ASSERT(left->representation().IsInteger32()); + ASSERT(right->representation().IsInteger32()); + return new LCmpIDAndBranch(UseRegisterAtStart(left), + UseRegisterAtStart(right)); + } else if (r.IsDouble()) { + ASSERT(left->representation().IsDouble()); + ASSERT(right->representation().IsDouble()); + return new LCmpIDAndBranch(UseRegisterAtStart(left), + UseRegisterAtStart(right)); + } else { + ASSERT(left->representation().IsTagged()); + ASSERT(right->representation().IsTagged()); + bool reversed = op == Token::GT || op == Token::LTE; + LOperand* left_operand = UseFixed(left, reversed ? r0 : r1); + LOperand* right_operand = UseFixed(right, reversed ? r1 : r0); + LInstruction* result = new LCmpTAndBranch(left_operand, + right_operand); + return MarkAsCall(result, instr); + } + } else if (v->IsIsSmi()) { + HIsSmi* compare = HIsSmi::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LIsSmiAndBranch(Use(compare->value())); + } else if (v->IsHasInstanceType()) { + HHasInstanceType* compare = HHasInstanceType::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + return new LHasInstanceTypeAndBranch( + UseRegisterAtStart(compare->value())); + } else if (v->IsHasCachedArrayIndex()) { + HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LHasCachedArrayIndexAndBranch( + UseRegisterAtStart(compare->value())); + } else if (v->IsIsNull()) { + HIsNull* compare = HIsNull::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + return new LIsNullAndBranch(UseRegisterAtStart(compare->value())); + } else if (v->IsIsObject()) { + HIsObject* compare = HIsObject::cast(v); + ASSERT(compare->value()->representation().IsTagged()); + + LOperand* temp = TempRegister(); + return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp); + } else if (v->IsCompareJSObjectEq()) { + HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); + return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), + UseRegisterAtStart(compare->right())); + } else if (v->IsInstanceOf()) { + HInstanceOf* instance_of = HInstanceOf::cast(v); + LInstruction* result = + new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0), + UseFixed(instance_of->right(), r1)); + return MarkAsCall(result, instr); + } else if (v->IsTypeofIs()) { + HTypeofIs* typeof_is = HTypeofIs::cast(v); + return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value())); + } else if (v->IsIsConstructCall()) { + return new LIsConstructCallAndBranch(TempRegister()); + } else { + if (v->IsConstant()) { + if (HConstant::cast(v)->handle()->IsTrue()) { + return new LGoto(instr->FirstSuccessor()->block_id()); + } else if (HConstant::cast(v)->handle()->IsFalse()) { + return new LGoto(instr->SecondSuccessor()->block_id()); + } + } + Abort("Undefined compare before branch"); + return NULL; + } } return new LBranch(UseRegisterAtStart(v)); } - LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { ASSERT(instr->value()->representation().IsTagged()); LOperand* value = UseRegisterAtStart(instr->value()); @@ -1089,13 +1178,8 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { } -LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { - return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction); -} - - LInstruction* LChunkBuilder::DoContext(HContext* instr) { - return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext); + return DefineAsRegister(new LContext); } @@ -1124,39 +1208,35 @@ LInstruction* LChunkBuilder::DoCallConstantFunction( } -LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { - LOperand* function = UseFixed(instr->function(), r1); - argument_count_ -= instr->argument_count(); - LInvokeFunction* result = new LInvokeFunction(function); - return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY); -} - - LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { BuiltinFunctionId op = instr->op(); - if (op == kMathLog || op == kMathSin || op == kMathCos) { - LOperand* input = UseFixedDouble(instr->value(), d2); - LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL); - return MarkAsCall(DefineFixedDouble(result, d2), instr); - } else { - LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; - LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); - switch (op) { - case kMathAbs: - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - case kMathFloor: - return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); - case kMathSqrt: - return DefineAsRegister(result); - case kMathRound: - return AssignEnvironment(DefineAsRegister(result)); - case kMathPowHalf: - return DefineAsRegister(result); - default: - UNREACHABLE(); - return NULL; - } + LOperand* input = UseRegisterAtStart(instr->value()); + LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; + LUnaryMathOperation* result = new LUnaryMathOperation(input, temp); + switch (op) { + case kMathAbs: + return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); + case kMathFloor: + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); + case kMathSqrt: + return DefineSameAsFirst(result); + case kMathRound: + return AssignEnvironment(DefineAsRegister(result)); + case kMathPowHalf: + Abort("MathPowHalf LUnaryMathOperation not implemented"); + return NULL; + case kMathLog: + Abort("MathLog LUnaryMathOperation not implemented"); + return NULL; + case kMathCos: + Abort("MathCos LUnaryMathOperation not implemented"); + return NULL; + case kMathSin: + Abort("MathSin LUnaryMathOperation not implemented"); + return NULL; + default: + UNREACHABLE(); + return NULL; } } @@ -1230,7 +1310,7 @@ LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) { LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { ASSERT(instr->value()->representation().IsInteger32()); ASSERT(instr->representation().IsInteger32()); - return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value()))); + return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value()))); } @@ -1249,7 +1329,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { return DoArithmeticD(Token::DIV, instr); } else if (instr->representation().IsInteger32()) { // TODO(1042) The fixed register allocation - // is needed because we call TypeRecordingBinaryOpStub from + // is needed because we call GenericBinaryOpStub from // the generated code, which requires registers r0 // and r1 to be used. We should remove that // when we provide a native implementation. @@ -1265,30 +1345,18 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { LInstruction* LChunkBuilder::DoMod(HMod* instr) { if (instr->representation().IsInteger32()) { + // TODO(1042) The fixed register allocation + // is needed because we call GenericBinaryOpStub from + // the generated code, which requires registers r0 + // and r1 to be used. We should remove that + // when we provide a native implementation. ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); - - LModI* mod; - if (instr->HasPowerOf2Divisor()) { - ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); - LOperand* value = UseRegisterAtStart(instr->left()); - mod = new LModI(value, UseOrConstant(instr->right())); - } else { - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - mod = new LModI(dividend, - divisor, - TempRegister(), - FixedTemp(d10), - FixedTemp(d11)); - } - - if (instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanBeDivByZero)) { - return AssignEnvironment(DefineAsRegister(mod)); - } else { - return DefineAsRegister(mod); - } + LOperand* value = UseFixed(instr->left(), r0); + LOperand* divisor = UseFixed(instr->right(), r1); + LInstruction* result = DefineFixed(new LModI(value, divisor), r0); + result = AssignEnvironment(AssignPointerMap(result)); + return result; } else if (instr->representation().IsTagged()) { return DoArithmeticT(Token::MOD, instr); } else { @@ -1308,22 +1376,16 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); - LOperand* left; + LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* right = UseOrConstant(instr->MostConstantOperand()); LOperand* temp = NULL; - if (instr->CheckFlag(HValue::kBailoutOnMinusZero) && - (instr->CheckFlag(HValue::kCanOverflow) || - !right->IsConstantOperand())) { - left = UseRegister(instr->LeastConstantOperand()); + if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { temp = TempRegister(); - } else { - left = UseRegisterAtStart(instr->LeastConstantOperand()); } - return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp))); - + LMulI* mul = new LMulI(left, right, temp); + return AssignEnvironment(DefineSameAsFirst(mul)); } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::MUL, instr); - } else { return DoArithmeticT(Token::MUL, instr); } @@ -1337,7 +1399,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new LSubI(left, right); - LInstruction* result = DefineAsRegister(sub); + LInstruction* result = DefineSameAsFirst(sub); if (instr->CheckFlag(HValue::kCanOverflow)) { result = AssignEnvironment(result); } @@ -1357,7 +1419,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); LAddI* add = new LAddI(left, right); - LInstruction* result = DefineAsRegister(add); + LInstruction* result = DefineSameAsFirst(add); if (instr->CheckFlag(HValue::kCanOverflow)) { result = AssignEnvironment(result); } @@ -1388,109 +1450,96 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { } -LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { +LInstruction* LChunkBuilder::DoCompare(HCompare* instr) { Token::Value op = instr->token(); Representation r = instr->GetInputRepresentation(); - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); - bool reversed = (op == Token::GT || op == Token::LTE); - LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1); - LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0); - LCmpT* result = new LCmpT(left, right); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoCompareIDAndBranch( - HCompareIDAndBranch* instr) { - Representation r = instr->GetInputRepresentation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - return new LCmpIDAndBranch(left, right); - } else { - ASSERT(r.IsDouble()); + return DefineAsRegister(new LCmpID(left, right)); + } else if (r.IsDouble()) { ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - return new LCmpIDAndBranch(left, right); + return DefineAsRegister(new LCmpID(left, right)); + } else { + ASSERT(instr->left()->representation().IsTagged()); + ASSERT(instr->right()->representation().IsTagged()); + bool reversed = (op == Token::GT || op == Token::LTE); + LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1); + LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0); + LCmpT* result = new LCmpT(left, right); + return MarkAsCall(DefineFixed(result, r0), instr); } } -LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( - HCompareObjectEqAndBranch* instr) { +LInstruction* LChunkBuilder::DoCompareJSObjectEq( + HCompareJSObjectEq* instr) { LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - return new LCmpObjectEqAndBranch(left, right); -} - - -LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( - HCompareConstantEqAndBranch* instr) { - return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value())); + LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right); + return DefineAsRegister(result); } -LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsNullAndBranch(UseRegisterAtStart(instr->value())); -} - + LOperand* value = UseRegisterAtStart(instr->value()); -LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) { - ASSERT(instr->value()->representation().IsTagged()); - LOperand* temp = TempRegister(); - return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp); + return DefineAsRegister(new LIsNull(value)); } -LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsSmiAndBranch(Use(instr->value())); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LIsObject(value)); } -LInstruction* LChunkBuilder::DoIsUndetectableAndBranch( - HIsUndetectableAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()), - TempRegister()); + LOperand* value = UseAtStart(instr->value()); + + return DefineAsRegister(new LIsSmi(value)); } -LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch( - HHasInstanceTypeAndBranch* instr) { +LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value())); + LOperand* value = UseRegisterAtStart(instr->value()); + + return DefineAsRegister(new LHasInstanceType(value)); } LInstruction* LChunkBuilder::DoGetCachedArrayIndex( HGetCachedArrayIndex* instr) { ASSERT(instr->value()->representation().IsTagged()); - LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* value = UseRegister(instr->value()); return DefineAsRegister(new LGetCachedArrayIndex(value)); } -LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( - HHasCachedArrayIndexAndBranch* instr) { +LInstruction* LChunkBuilder::DoHasCachedArrayIndex( + HHasCachedArrayIndex* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LHasCachedArrayIndexAndBranch( - UseRegisterAtStart(instr->value())); + LOperand* value = UseRegister(instr->value()); + + return DefineAsRegister(new LHasCachedArrayIndex(value)); } -LInstruction* LChunkBuilder::DoClassOfTestAndBranch( - HClassOfTestAndBranch* instr) { +LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LClassOfTestAndBranch(UseTempRegister(instr->value()), - TempRegister()); + LOperand* value = UseTempRegister(instr->value()); + return DefineSameAsFirst(new LClassOfTest(value)); } @@ -1500,10 +1549,9 @@ LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) { } -LInstruction* LChunkBuilder::DoExternalArrayLength( - HExternalArrayLength* instr) { +LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) { LOperand* array = UseRegisterAtStart(instr->value()); - return DefineAsRegister(new LExternalArrayLength(array)); + return DefineAsRegister(new LPixelArrayLength(array)); } @@ -1513,16 +1561,10 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) { } -LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) { - LOperand* object = UseRegisterAtStart(instr->value()); - return DefineAsRegister(new LElementsKind(object)); -} - - LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LOperand* object = UseRegister(instr->value()); LValueOf* result = new LValueOf(object, TempRegister()); - return AssignEnvironment(DefineAsRegister(result)); + return AssignEnvironment(DefineSameAsFirst(result)); } @@ -1545,19 +1587,6 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { } -LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { - return NULL; -} - - -LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { - // All HForceRepresentation instructions should be eliminated in the - // representation change phase of Hydrogen. - UNREACHABLE(); - return NULL; -} - - LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); @@ -1571,15 +1600,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LOperand* value = UseRegister(instr->value()); bool needs_check = !instr->value()->type().IsSmi(); LInstruction* res = NULL; - if (!needs_check) { - res = DefineSameAsFirst(new LSmiUntag(value, needs_check)); + if (needs_check) { + res = DefineSameAsFirst(new LTaggedToI(value, FixedTemp(d1))); } else { - LOperand* temp1 = TempRegister(); - LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() - : NULL; - LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11) - : NULL; - res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3)); + res = DefineSameAsFirst(new LSmiUntag(value, needs_check)); + } + if (needs_check) { res = AssignEnvironment(res); } return res; @@ -1599,10 +1625,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else { ASSERT(to.IsInteger32()); LOperand* value = UseRegister(instr->value()); - LDoubleToI* res = - new LDoubleToI(value, - TempRegister(), - instr->CanTruncateToInt32() ? TempRegister() : NULL); + LDoubleToI* res = new LDoubleToI(value, TempRegister()); return AssignEnvironment(DefineAsRegister(res)); } } else if (from.IsInteger32()) { @@ -1628,7 +1651,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new LCheckNonSmi(value)); + return AssignEnvironment(new LCheckSmi(value, eq)); } @@ -1649,7 +1672,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new LCheckSmi(value)); + return AssignEnvironment(new LCheckSmi(value, ne)); } @@ -1666,49 +1689,6 @@ LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) { } -LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11))); - } else if (input_rep.IsInteger32()) { - return DefineAsRegister(new LClampIToUint8(reg)); - } else { - ASSERT(input_rep.IsTagged()); - // Register allocator doesn't (yet) support allocation of double - // temps. Reserve d1 explicitly. - LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11)); - return AssignEnvironment(DefineAsRegister(result)); - } -} - - -LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) { - HValue* value = instr->value(); - Representation input_rep = value->representation(); - LOperand* reg = UseRegister(value); - if (input_rep.IsDouble()) { - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LDoubleToI* res = new LDoubleToI(reg, temp1, temp2); - return AssignEnvironment(DefineAsRegister(res)); - } else if (input_rep.IsInteger32()) { - // Canonicalization should already have removed the hydrogen instruction in - // this case, since it is a noop. - UNREACHABLE(); - return NULL; - } else { - ASSERT(input_rep.IsTagged()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LOperand* temp3 = FixedTemp(d11); - LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3); - return AssignEnvironment(DefineSameAsFirst(res)); - } -} - - LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { return new LReturn(UseFixed(instr->value(), r0)); } @@ -1729,42 +1709,26 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { } -LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { - LLoadGlobalCell* result = new LLoadGlobalCell; +LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) { + LLoadGlobal* result = new LLoadGlobal(); return instr->check_hole_value() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } -LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { - LOperand* global_object = UseFixed(instr->global_object(), r0); - LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - -LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { +LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { if (instr->check_hole_value()) { LOperand* temp = TempRegister(); LOperand* value = UseRegister(instr->value()); - return AssignEnvironment(new LStoreGlobalCell(value, temp)); + return AssignEnvironment(new LStoreGlobal(value, temp)); } else { LOperand* value = UseRegisterAtStart(instr->value()); - return new LStoreGlobalCell(value, NULL); + return new LStoreGlobal(value, NULL); } } -LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) { - LOperand* global_object = UseFixed(instr->global_object(), r1); - LOperand* value = UseFixed(instr->value(), r0); - LStoreGlobalGeneric* result = - new LStoreGlobalGeneric(global_object, value); - return MarkAsCall(result, instr); -} - - LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LOperand* context = UseRegisterAtStart(instr->value()); return DefineAsRegister(new LLoadContextSlot(context)); @@ -1791,21 +1755,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { } -LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic( - HLoadNamedFieldPolymorphic* instr) { - ASSERT(instr->representation().IsTagged()); - if (instr->need_generic()) { - LOperand* obj = UseFixed(instr->object(), r0); - LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj); - return MarkAsCall(DefineFixed(result, r0), instr); - } else { - LOperand* obj = UseRegisterAtStart(instr->object()); - LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj); - return AssignEnvironment(DefineAsRegister(result)); - } -} - - LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LOperand* object = UseFixed(instr->object(), r0); LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0); @@ -1826,10 +1775,10 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) { } -LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( - HLoadExternalArrayPointer* instr) { +LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer( + HLoadPixelArrayExternalPointer* instr) { LOperand* input = UseRegisterAtStart(instr->value()); - return DefineAsRegister(new LLoadExternalArrayPointer(input)); + return DefineAsRegister(new LLoadPixelArrayExternalPointer(input)); } @@ -1840,31 +1789,20 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement( LOperand* obj = UseRegisterAtStart(instr->object()); LOperand* key = UseRegisterAtStart(instr->key()); LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key); - return AssignEnvironment(DefineAsRegister(result)); + return AssignEnvironment(DefineSameAsFirst(result)); } -LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( - HLoadKeyedSpecializedArrayElement* instr) { - JSObject::ElementsKind elements_kind = instr->elements_kind(); - Representation representation(instr->representation()); - ASSERT( - (representation.IsInteger32() && - (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && - ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS)))); +LInstruction* LChunkBuilder::DoLoadPixelArrayElement( + HLoadPixelArrayElement* instr) { + ASSERT(instr->representation().IsInteger32()); ASSERT(instr->key()->representation().IsInteger32()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LLoadKeyedSpecializedArrayElement* result = - new LLoadKeyedSpecializedArrayElement(external_pointer, key); - LInstruction* load_instr = DefineAsRegister(result); - // An unsigned int array load might overflow and cause a deopt, make sure it - // has an environment. - return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ? - AssignEnvironment(load_instr) : load_instr; + LOperand* external_pointer = + UseRegisterAtStart(instr->external_pointer()); + LOperand* key = UseRegisterAtStart(instr->key()); + LLoadPixelArrayElement* result = + new LLoadPixelArrayElement(external_pointer, key); + return DefineAsRegister(result); } @@ -1897,32 +1835,10 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } -LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( - HStoreKeyedSpecializedArrayElement* instr) { - Representation representation(instr->value()->representation()); - JSObject::ElementsKind elements_kind = instr->elements_kind(); - ASSERT( - (representation.IsInteger32() && - (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) || - (representation.IsDouble() && - ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->external_pointer()->representation().IsExternal()); - ASSERT(instr->key()->representation().IsInteger32()); - - LOperand* external_pointer = UseRegister(instr->external_pointer()); - bool val_is_temp_register = - elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS || - elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register - ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstant(instr->key()); - - return new LStoreKeyedSpecializedArrayElement(external_pointer, - key, - val); +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + Abort("DoStorePixelArrayElement not implemented"); + return NULL; } @@ -1963,13 +1879,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr); -} - - LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { LOperand* string = UseRegister(instr->string()); LOperand* index = UseRegisterOrConstant(instr->index()); @@ -1978,13 +1887,6 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { } -LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { - LOperand* char_code = UseRegister(instr->value()); - LStringCharFromCode* result = new LStringCharFromCode(char_code); - return AssignPointerMap(DefineAsRegister(result)); -} - - LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LOperand* string = UseRegisterAtStart(instr->value()); return DefineAsRegister(new LStringLength(string)); @@ -2066,27 +1968,19 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { } -LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) { - LOperand* object = UseFixed(instr->value(), r0); - LToFastProperties* result = new LToFastProperties(object); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { LTypeof* result = new LTypeof(UseFixed(instr->value(), r0)); return MarkAsCall(DefineFixed(result, r0), instr); } -LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { - return new LTypeofIsAndBranch(UseTempRegister(instr->value())); +LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) { + return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value()))); } -LInstruction* LChunkBuilder::DoIsConstructCallAndBranch( - HIsConstructCallAndBranch* instr) { - return new LIsConstructCallAndBranch(TempRegister()); +LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) { + return DefineAsRegister(new LIsConstructCall()); } @@ -2106,6 +2000,8 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { } } + ASSERT(env->length() == instr->environment_length()); + // If there is an instruction pending deoptimization environment create a // lazy bailout instruction to capture the environment. if (pending_deoptimization_ast_id_ == instr->ast_id()) { @@ -2122,12 +2018,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { - if (instr->is_function_entry()) { - return MarkAsCall(new LStackCheck, instr); - } else { - ASSERT(instr->is_backwards_branch()); - return AssignEnvironment(AssignPointerMap(new LStackCheck)); - } + return MarkAsCall(new LStackCheck, instr); } @@ -2136,8 +2027,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { HConstant* undefined = graph()->GetConstantUndefined(); HEnvironment* inner = outer->CopyForInlining(instr->closure(), instr->function(), - undefined, - instr->call_kind()); + false, + undefined); current_block_->UpdateEnvironment(inner); chunk_->AddInlinedClosure(instr->closure()); return NULL; @@ -2151,12 +2042,4 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { } -LInstruction* LChunkBuilder::DoIn(HIn* instr) { - LOperand* key = UseRegisterAtStart(instr->key()); - LOperand* object = UseRegisterAtStart(instr->object()); - LIn* result = new LIn(key, object); - return MarkAsCall(DefineFixed(result, r0), instr); -} - - } } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index ebeba86943..77d6b71a93 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -32,7 +32,6 @@ #include "lithium-allocator.h" #include "lithium.h" #include "safepoint-table.h" -#include "utils.h" namespace v8 { namespace internal { @@ -70,19 +69,18 @@ class LCodeGen; V(CallStub) \ V(CheckFunction) \ V(CheckInstanceType) \ - V(CheckNonSmi) \ V(CheckMap) \ V(CheckPrototypeMaps) \ V(CheckSmi) \ - V(ClampDToUint8) \ - V(ClampIToUint8) \ - V(ClampTToUint8) \ + V(ClassOfTest) \ V(ClassOfTestAndBranch) \ - V(CmpConstantEqAndBranch) \ + V(CmpID) \ V(CmpIDAndBranch) \ - V(CmpObjectEqAndBranch) \ + V(CmpJSObjectEq) \ + V(CmpJSObjectEqAndBranch) \ V(CmpMapAndBranch) \ V(CmpT) \ + V(CmpTAndBranch) \ V(ConstantD) \ V(ConstantI) \ V(ConstantT) \ @@ -91,42 +89,40 @@ class LCodeGen; V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ - V(ElementsKind) \ - V(ExternalArrayLength) \ V(FixedArrayLength) \ V(FunctionLiteral) \ + V(Gap) \ V(GetCachedArrayIndex) \ V(GlobalObject) \ V(GlobalReceiver) \ V(Goto) \ + V(HasCachedArrayIndex) \ V(HasCachedArrayIndexAndBranch) \ + V(HasInstanceType) \ V(HasInstanceTypeAndBranch) \ - V(In) \ V(InstanceOf) \ + V(InstanceOfAndBranch) \ V(InstanceOfKnownGlobal) \ - V(InstructionGap) \ V(Integer32ToDouble) \ - V(InvokeFunction) \ - V(IsConstructCallAndBranch) \ + V(IsNull) \ V(IsNullAndBranch) \ + V(IsObject) \ V(IsObjectAndBranch) \ + V(IsSmi) \ V(IsSmiAndBranch) \ - V(IsUndetectableAndBranch) \ V(JSArrayLength) \ V(Label) \ V(LazyBailout) \ V(LoadContextSlot) \ V(LoadElements) \ - V(LoadExternalArrayPointer) \ V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ - V(LoadGlobalGeneric) \ + V(LoadGlobal) \ V(LoadKeyedFastElement) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ - V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ + V(LoadPixelArrayElement) \ + V(LoadPixelArrayExternalPointer) \ V(ModI) \ V(MulI) \ V(NumberTagD) \ @@ -136,6 +132,7 @@ class LCodeGen; V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ + V(PixelArrayLength) \ V(Power) \ V(PushArgument) \ V(RegExpLiteral) \ @@ -145,39 +142,40 @@ class LCodeGen; V(SmiUntag) \ V(StackCheck) \ V(StoreContextSlot) \ - V(StoreGlobalCell) \ - V(StoreGlobalGeneric) \ + V(StoreGlobal) \ V(StoreKeyedFastElement) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ - V(StringAdd) \ V(StringCharCodeAt) \ - V(StringCharFromCode) \ V(StringLength) \ V(SubI) \ V(TaggedToI) \ - V(ThisFunction) \ V(Throw) \ - V(ToFastProperties) \ V(Typeof) \ + V(TypeofIs) \ V(TypeofIsAndBranch) \ + V(IsConstructCall) \ + V(IsConstructCallAndBranch) \ V(UnaryMathOperation) \ V(UnknownOSRValue) \ V(ValueOf) -#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ - virtual Opcode opcode() const { return LInstruction::k##type; } \ - virtual void CompileToNative(LCodeGen* generator); \ - virtual const char* Mnemonic() const { return mnemonic; } \ - static L##type* cast(LInstruction* instr) { \ - ASSERT(instr->Is##type()); \ - return reinterpret_cast<L##type*>(instr); \ +#define DECLARE_INSTRUCTION(type) \ + virtual bool Is##type() const { return true; } \ + static L##type* cast(LInstruction* instr) { \ + ASSERT(instr->Is##type()); \ + return reinterpret_cast<L##type*>(instr); \ } +#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ + virtual void CompileToNative(LCodeGen* generator); \ + virtual const char* Mnemonic() const { return mnemonic; } \ + DECLARE_INSTRUCTION(type) + + #define DECLARE_HYDROGEN_ACCESSOR(type) \ H##type* hydrogen() const { \ return H##type::cast(hydrogen_value()); \ @@ -199,27 +197,13 @@ class LInstruction: public ZoneObject { virtual void PrintDataTo(StringStream* stream) = 0; virtual void PrintOutputOperandTo(StringStream* stream) = 0; - enum Opcode { - // Declare a unique enum value for each instruction. -#define DECLARE_OPCODE(type) k##type, - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) - kNumberOfInstructions -#undef DECLARE_OPCODE - }; - - virtual Opcode opcode() const = 0; - - // Declare non-virtual type testers for all leaf IR classes. -#define DECLARE_PREDICATE(type) \ - bool Is##type() const { return opcode() == k##type; } - LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE) -#undef DECLARE_PREDICATE - - // Declare virtual predicates for instructions that don't have - // an opcode. - virtual bool IsGap() const { return false; } + // Declare virtual type testers. +#define DECLARE_DO(type) virtual bool Is##type() const { return false; } + LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO) +#undef DECLARE_DO virtual bool IsControl() const { return false; } + virtual void SetBranchTargets(int true_block_id, int false_block_id) { } void set_environment(LEnvironment* env) { environment_ = env; } LEnvironment* environment() const { return environment_; } @@ -274,6 +258,37 @@ class LInstruction: public ZoneObject { }; +template<typename ElementType, int NumElements> +class OperandContainer { + public: + OperandContainer() { + for (int i = 0; i < NumElements; i++) elems_[i] = NULL; + } + int length() { return NumElements; } + ElementType& operator[](int i) { + ASSERT(i < length()); + return elems_[i]; + } + void PrintOperandsTo(StringStream* stream); + + private: + ElementType elems_[NumElements]; +}; + + +template<typename ElementType> +class OperandContainer<ElementType, 0> { + public: + int length() { return 0; } + void PrintOperandsTo(StringStream* stream) { } + ElementType& operator[](int i) { + UNREACHABLE(); + static ElementType t = 0; + return t; + } +}; + + // R = number of result operands (0 or 1). // I = number of input operands. // T = number of temporary operands. @@ -296,9 +311,9 @@ class LTemplateInstruction: public LInstruction { virtual void PrintOutputOperandTo(StringStream* stream); protected: - EmbeddedContainer<LOperand*, R> results_; - EmbeddedContainer<LOperand*, I> inputs_; - EmbeddedContainer<LOperand*, T> temps_; + OperandContainer<LOperand*, R> results_; + OperandContainer<LOperand*, I> inputs_; + OperandContainer<LOperand*, T> temps_; }; @@ -312,13 +327,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> { parallel_moves_[AFTER] = NULL; } - // Can't use the DECLARE-macro here because of sub-classes. - virtual bool IsGap() const { return true; } - virtual void PrintDataTo(StringStream* stream); - static LGap* cast(LInstruction* instr) { - ASSERT(instr->IsGap()); - return reinterpret_cast<LGap*>(instr); - } + DECLARE_CONCRETE_INSTRUCTION(Gap, "gap") + virtual void PrintDataTo(StringStream* stream) const; bool IsRedundant() const; @@ -348,26 +358,21 @@ class LGap: public LTemplateInstruction<0, 0, 0> { }; -class LInstructionGap: public LGap { - public: - explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } - - DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") -}; - - class LGoto: public LTemplateInstruction<0, 0, 0> { public: - explicit LGoto(int block_id) : block_id_(block_id) { } + LGoto(int block_id, bool include_stack_check = false) + : block_id_(block_id), include_stack_check_(include_stack_check) { } DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } int block_id() const { return block_id_; } + bool include_stack_check() const { return include_stack_check_; } private: int block_id_; + bool include_stack_check_; }; @@ -441,17 +446,19 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { template<int I, int T> class LControlInstruction: public LTemplateInstruction<0, I, T> { public: + DECLARE_INSTRUCTION(ControlInstruction) virtual bool IsControl() const { return true; } - int SuccessorCount() { return hydrogen()->SuccessorCount(); } - HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); } - int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); } - int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); } + int true_block_id() const { return true_block_id_; } + int false_block_id() const { return false_block_id_; } + void SetBranchTargets(int true_block_id, int false_block_id) { + true_block_id_ = true_block_id; + false_block_id_ = false_block_id; + } private: - HControlInstruction* hydrogen() { - return HControlInstruction::cast(this->hydrogen_value()); - } + int true_block_id_; + int false_block_id_; }; @@ -512,29 +519,11 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { }; -class LModI: public LTemplateInstruction<1, 2, 3> { +class LModI: public LTemplateInstruction<1, 2, 0> { public: - // Used when the right hand is a constant power of 2. - LModI(LOperand* left, - LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = NULL; - temps_[1] = NULL; - temps_[2] = NULL; - } - - // Used for the standard case. - LModI(LOperand* left, - LOperand* right, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { + LModI(LOperand* left, LOperand* right) { inputs_[0] = left; inputs_[1] = right; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; } DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") @@ -567,6 +556,23 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { }; +class LCmpID: public LTemplateInstruction<1, 2, 0> { + public: + LCmpID(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id") + DECLARE_HYDROGEN_ACCESSOR(Compare) + + Token::Value op() const { return hydrogen()->token(); } + bool is_double() const { + return hydrogen()->GetInputRepresentation().IsDouble(); + } +}; + + class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { @@ -575,7 +581,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { } DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch) + DECLARE_HYDROGEN_ACCESSOR(Compare) Token::Value op() const { return hydrogen()->token(); } bool is_double() const { @@ -601,31 +607,41 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { }; -class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { +class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> { public: - LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { + LCmpJSObjectEq(LOperand* left, LOperand* right) { inputs_[0] = left; inputs_[1] = right; } - DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, - "cmp-object-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) + DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq") }; -class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { +class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> { public: - explicit LCmpConstantEqAndBranch(LOperand* left) { + LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; + inputs_[1] = right; } - DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch, - "cmp-constant-eq-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch) + DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch, + "cmp-jsobject-eq-and-branch") }; +class LIsNull: public LTemplateInstruction<1, 1, 0> { + public: + explicit LIsNull(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null") + DECLARE_HYDROGEN_ACCESSOR(IsNull) + + bool is_strict() const { return hydrogen()->is_strict(); } +}; + class LIsNullAndBranch: public LControlInstruction<1, 0> { public: explicit LIsNullAndBranch(LOperand* value) { @@ -633,7 +649,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> { } DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) + DECLARE_HYDROGEN_ACCESSOR(IsNull) bool is_strict() const { return hydrogen()->is_strict(); } @@ -641,7 +657,17 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> { }; -class LIsObjectAndBranch: public LControlInstruction<1, 1> { +class LIsObject: public LTemplateInstruction<1, 1, 1> { + public: + explicit LIsObject(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object") +}; + + +class LIsObjectAndBranch: public LControlInstruction<1, 2> { public: LIsObjectAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; @@ -649,12 +675,22 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> { } DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch) virtual void PrintDataTo(StringStream* stream); }; +class LIsSmi: public LTemplateInstruction<1, 1, 0> { + public: + explicit LIsSmi(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi") + DECLARE_HYDROGEN_ACCESSOR(IsSmi) +}; + + class LIsSmiAndBranch: public LControlInstruction<1, 0> { public: explicit LIsSmiAndBranch(LOperand* value) { @@ -662,24 +698,19 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> { } DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) virtual void PrintDataTo(StringStream* stream); }; -class LIsUndetectableAndBranch: public LControlInstruction<1, 1> { +class LHasInstanceType: public LTemplateInstruction<1, 1, 0> { public: - explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) { + explicit LHasInstanceType(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } - DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, - "is-undetectable-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) - - virtual void PrintDataTo(StringStream* stream); + DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type") + DECLARE_HYDROGEN_ACCESSOR(HasInstanceType) }; @@ -691,12 +722,23 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, "has-instance-type-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) + DECLARE_HYDROGEN_ACCESSOR(HasInstanceType) virtual void PrintDataTo(StringStream* stream); }; +class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { + public: + explicit LHasCachedArrayIndex(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index") + DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex) +}; + + class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { public: explicit LGetCachedArrayIndex(LOperand* value) { @@ -716,7 +758,18 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch, "has-cached-array-index-and-branch") - DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch) + virtual void PrintDataTo(StringStream* stream); +}; + + +class LClassOfTest: public LTemplateInstruction<1, 1, 0> { + public: + explicit LClassOfTest(LOperand* value) { + inputs_[0] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test") + DECLARE_HYDROGEN_ACCESSOR(ClassOfTest) virtual void PrintDataTo(StringStream* stream); }; @@ -731,7 +784,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> { DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") - DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) + DECLARE_HYDROGEN_ACCESSOR(ClassOfTest) virtual void PrintDataTo(StringStream* stream); }; @@ -745,7 +798,21 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> { } DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") - DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) + DECLARE_HYDROGEN_ACCESSOR(Compare) + + Token::Value op() const { return hydrogen()->token(); } +}; + + +class LCmpTAndBranch: public LControlInstruction<2, 0> { + public: + LCmpTAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch") + DECLARE_HYDROGEN_ACCESSOR(Compare) Token::Value op() const { return hydrogen()->token(); } }; @@ -762,6 +829,17 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> { }; +class LInstanceOfAndBranch: public LControlInstruction<2, 0> { + public: + LInstanceOfAndBranch(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch") +}; + + class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { public: LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) { @@ -874,7 +952,7 @@ class LBranch: public LControlInstruction<1, 0> { } DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") - DECLARE_HYDROGEN_ACCESSOR(Branch) + DECLARE_HYDROGEN_ACCESSOR(Value) virtual void PrintDataTo(StringStream* stream); }; @@ -913,14 +991,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> { }; -class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> { +class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> { public: - explicit LExternalArrayLength(LOperand* value) { + explicit LPixelArrayLength(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length") - DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength) + DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length") + DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength) }; @@ -935,17 +1013,6 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { }; -class LElementsKind: public LTemplateInstruction<1, 1, 0> { - public: - explicit LElementsKind(LOperand* value) { - inputs_[0] = value; - } - - DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind") - DECLARE_HYDROGEN_ACCESSOR(ElementsKind) -}; - - class LValueOf: public LTemplateInstruction<1, 1, 1> { public: LValueOf(LOperand* value, LOperand* temp) { @@ -1012,7 +1079,6 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> { Token::Value op() const { return op_; } - virtual Opcode opcode() const { return LInstruction::kArithmeticD; } virtual void CompileToNative(LCodeGen* generator); virtual const char* Mnemonic() const; @@ -1029,7 +1095,6 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } - virtual Opcode opcode() const { return LInstruction::kArithmeticT; } virtual void CompileToNative(LCodeGen* generator); virtual const char* Mnemonic() const; @@ -1061,19 +1126,6 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> { }; -class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> { - public: - explicit LLoadNamedFieldPolymorphic(LOperand* object) { - inputs_[0] = object; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic") - DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic) - - LOperand* object() { return inputs_[0]; } -}; - - class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadNamedGeneric(LOperand* object) { @@ -1111,14 +1163,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> { }; -class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { +class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> { public: - explicit LLoadExternalArrayPointer(LOperand* object) { + explicit LLoadPixelArrayExternalPointer(LOperand* object) { inputs_[0] = object; } - DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer, - "load-external-array-pointer") + DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer, + "load-pixel-array-external-pointer") }; @@ -1137,23 +1189,19 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { }; -class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { +class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, - "load-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) + DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement, + "load-pixel-array-element") + DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement) LOperand* external_pointer() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - JSObject::ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } }; @@ -1171,55 +1219,22 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> { }; -class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell") - DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell) -}; - - -class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> { +class LLoadGlobal: public LTemplateInstruction<1, 0, 0> { public: - explicit LLoadGlobalGeneric(LOperand* global_object) { - inputs_[0] = global_object; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") - DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) - - LOperand* global_object() { return inputs_[0]; } - Handle<Object> name() const { return hydrogen()->name(); } - bool for_typeof() const { return hydrogen()->for_typeof(); } + DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global") + DECLARE_HYDROGEN_ACCESSOR(LoadGlobal) }; -class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> { +class LStoreGlobal: public LTemplateInstruction<0, 1, 1> { public: - LStoreGlobalCell(LOperand* value, LOperand* temp) { + LStoreGlobal(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } - DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") - DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell) -}; - - -class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> { - public: - explicit LStoreGlobalGeneric(LOperand* global_object, - LOperand* value) { - inputs_[0] = global_object; - inputs_[1] = value; - } - - DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic") - DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric) - - LOperand* global_object() { return InputAt(0); } - Handle<Object> name() const { return hydrogen()->name(); } - LOperand* value() { return InputAt(1); } - bool strict_mode() { return hydrogen()->strict_mode(); } + DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global") + DECLARE_HYDROGEN_ACCESSOR(StoreGlobal) }; @@ -1268,11 +1283,6 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> { }; -class LThisFunction: public LTemplateInstruction<1, 0, 0> { - DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") -}; - - class LContext: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Context, "context") @@ -1327,23 +1337,6 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> { }; -class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { - public: - explicit LInvokeFunction(LOperand* function) { - inputs_[0] = function; - } - - DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") - DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - - LOperand* function() { return inputs_[0]; } - - virtual void PrintDataTo(StringStream* stream); - - int arity() const { return hydrogen()->argument_count() - 1; } -}; - - class LCallKeyed: public LTemplateInstruction<1, 1, 0> { public: explicit LCallKeyed(LOperand* key) { @@ -1425,7 +1418,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> { DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) - const Runtime::Function* function() const { return hydrogen()->function(); } + Runtime::Function* function() const { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count(); } }; @@ -1463,36 +1456,30 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> { // Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI: public LTemplateInstruction<1, 1, 2> { +class LDoubleToI: public LTemplateInstruction<1, 1, 1> { public: - LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) { + explicit LDoubleToI(LOperand* value, LOperand* temp1) { inputs_[0] = value; temps_[0] = temp1; - temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } }; // Truncating conversion from a tagged value to an int32. -class LTaggedToI: public LTemplateInstruction<1, 1, 3> { +class LTaggedToI: public LTemplateInstruction<1, 1, 1> { public: - LTaggedToI(LOperand* value, - LOperand* temp1, - LOperand* temp2, - LOperand* temp3) { + LTaggedToI(LOperand* value, LOperand* temp) { inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = temp3; + temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } }; @@ -1515,7 +1502,6 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> { } DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") - DECLARE_HYDROGEN_ACCESSOR(Change) }; @@ -1573,7 +1559,6 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } Handle<Object> name() const { return hydrogen()->name(); } - bool strict_mode() { return hydrogen()->strict_mode(); } }; @@ -1606,55 +1591,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } - bool strict_mode() { return hydrogen()->strict_mode(); } -}; - -class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key, - LOperand* val) { - inputs_[0] = external_pointer; - inputs_[1] = key; - inputs_[2] = val; - } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, - "store-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - JSObject::ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } -}; - - -class LStringAdd: public LTemplateInstruction<1, 2, 0> { - public: - LStringAdd(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } }; - class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { public: LStringCharCodeAt(LOperand* string, LOperand* index) { @@ -1670,19 +1615,6 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { }; -class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> { - public: - explicit LStringCharFromCode(LOperand* char_code) { - inputs_[0] = char_code; - } - - DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") - DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) - - LOperand* char_code() { return inputs_[0]; } -}; - - class LStringLength: public LTemplateInstruction<1, 1, 0> { public: explicit LStringLength(LOperand* string) { @@ -1746,59 +1678,20 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { class LCheckSmi: public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckSmi(LOperand* value) { - inputs_[0] = value; - } - - DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") -}; - - -class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> { - public: - explicit LCheckNonSmi(LOperand* value) { - inputs_[0] = value; - } - - DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") -}; - - -class LClampDToUint8: public LTemplateInstruction<1, 1, 1> { - public: - LClampDToUint8(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") -}; - - -class LClampIToUint8: public LTemplateInstruction<1, 1, 0> { - public: - explicit LClampIToUint8(LOperand* value) { + LCheckSmi(LOperand* value, Condition condition) + : condition_(condition) { inputs_[0] = value; } - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8") -}; - + Condition condition() const { return condition_; } -class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { - public: - LClampTToUint8(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; + virtual void CompileToNative(LCodeGen* generator); + virtual const char* Mnemonic() const { + return (condition_ == eq) ? "check-non-smi" : "check-smi"; } - LOperand* unclamped() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") + private: + Condition condition_; }; @@ -1832,24 +1725,28 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { }; -class LToFastProperties: public LTemplateInstruction<1, 1, 0> { +class LTypeof: public LTemplateInstruction<1, 1, 0> { public: - explicit LToFastProperties(LOperand* value) { + explicit LTypeof(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties") - DECLARE_HYDROGEN_ACCESSOR(ToFastProperties) + DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") }; -class LTypeof: public LTemplateInstruction<1, 1, 0> { +class LTypeofIs: public LTemplateInstruction<1, 1, 0> { public: - explicit LTypeof(LOperand* value) { + explicit LTypeofIs(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") + DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is") + DECLARE_HYDROGEN_ACCESSOR(TypeofIs) + + Handle<String> type_literal() { return hydrogen()->type_literal(); } + + virtual void PrintDataTo(StringStream* stream); }; @@ -1860,7 +1757,7 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> { } DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") - DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) + DECLARE_HYDROGEN_ACCESSOR(TypeofIs) Handle<String> type_literal() { return hydrogen()->type_literal(); } @@ -1868,6 +1765,13 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> { }; +class LIsConstructCall: public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call") + DECLARE_HYDROGEN_ACCESSOR(IsConstructCall) +}; + + class LIsConstructCallAndBranch: public LControlInstruction<0, 1> { public: explicit LIsConstructCallAndBranch(LOperand* temp) { @@ -1919,33 +1823,13 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { class LStackCheck: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") - DECLARE_HYDROGEN_ACCESSOR(StackCheck) - - Label* done_label() { return &done_label_; } - - private: - Label done_label_; -}; - - -class LIn: public LTemplateInstruction<1, 2, 0> { - public: - LIn(LOperand* key, LOperand* object) { - inputs_[0] = key; - inputs_[1] = object; - } - - LOperand* key() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(In, "in") }; class LChunkBuilder; class LChunk: public ZoneObject { public: - explicit LChunk(CompilationInfo* info, HGraph* graph); + explicit LChunk(HGraph* graph); void AddInstruction(LInstruction* instruction, HBasicBlock* block); LConstantOperand* DefineConstantOperand(HConstant* constant); @@ -1958,7 +1842,6 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } - CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } void AddGapMove(int index, LOperand* from, LOperand* to); @@ -1995,7 +1878,6 @@ class LChunk: public ZoneObject { private: int spill_slot_count_; - CompilationInfo* info_; HGraph* const graph_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; @@ -2005,9 +1887,8 @@ class LChunk: public ZoneObject { class LChunkBuilder BASE_EMBEDDED { public: - LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) + LChunkBuilder(HGraph* graph, LAllocator* allocator) : chunk_(NULL), - info_(info), graph_(graph), status_(UNUSED), current_instruction_(NULL), @@ -2036,7 +1917,6 @@ class LChunkBuilder BASE_EMBEDDED { }; LChunk* chunk() const { return chunk_; } - CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } bool is_unused() const { return status_ == UNUSED; } @@ -2143,7 +2023,6 @@ class LChunkBuilder BASE_EMBEDDED { HArithmeticBinaryOperation* instr); LChunk* chunk_; - CompilationInfo* info_; HGraph* const graph_; Status status_; HInstruction* current_instruction_; @@ -2159,6 +2038,7 @@ class LChunkBuilder BASE_EMBEDDED { }; #undef DECLARE_HYDROGEN_ACCESSOR +#undef DECLARE_INSTRUCTION #undef DECLARE_CONCRETE_INSTRUCTION } } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 24e2044f27..1ec2b9842f 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -25,8 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" - #include "arm/lithium-codegen-arm.h" #include "arm/lithium-gap-resolver-arm.h" #include "code-stubs.h" @@ -36,7 +34,7 @@ namespace v8 { namespace internal { -class SafepointGenerator : public CallWrapper { +class SafepointGenerator : public PostCallGenerator { public: SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, @@ -46,24 +44,7 @@ class SafepointGenerator : public CallWrapper { deoptimization_index_(deoptimization_index) { } virtual ~SafepointGenerator() { } - virtual void BeforeCall(int call_size) const { - ASSERT(call_size >= 0); - // Ensure that we have enough space after the previous safepoint position - // for the generated code there. - int call_end = codegen_->masm()->pc_offset() + call_size; - int prev_jump_end = - codegen_->LastSafepointEnd() + Deoptimizer::patch_size(); - if (call_end < prev_jump_end) { - int padding_size = prev_jump_end - call_end; - ASSERT_EQ(0, padding_size % Assembler::kInstrSize); - while (padding_size > 0) { - codegen_->masm()->nop(); - padding_size -= Assembler::kInstrSize; - } - } - } - - virtual void AfterCall() const { + virtual void Generate() { codegen_->RecordSafepoint(pointers_, deoptimization_index_); } @@ -85,14 +66,13 @@ bool LCodeGen::GenerateCode() { return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && - GenerateDeoptJumpTable() && GenerateSafepointTable(); } void LCodeGen::FinishCode(Handle<Code> code) { ASSERT(is_done()); - code->set_stack_slots(GetStackSlotCount()); + code->set_stack_slots(StackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); PopulateDeoptimizationData(code); Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); @@ -101,8 +81,8 @@ void LCodeGen::FinishCode(Handle<Code> code) { void LCodeGen::Abort(const char* format, ...) { if (FLAG_trace_bailout) { - SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString()); - PrintF("Aborting LCodeGen in @\"%s\": ", *name); + SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); + PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); va_list arguments; va_start(arguments, format); OS::VPrint(format, arguments); @@ -146,25 +126,11 @@ bool LCodeGen::GeneratePrologue() { // fp: Caller's frame pointer. // lr: Caller's pc. - // Strict mode functions and builtins need to replace the receiver - // with undefined when called as functions (without an explicit - // receiver object). r5 is zero for method calls and non-zero for - // function calls. - if (info_->is_strict_mode() || info_->is_native()) { - Label ok; - __ cmp(r5, Operand(0)); - __ b(eq, &ok); - int receiver_offset = scope()->num_parameters() * kPointerSize; - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ str(r2, MemOperand(sp, receiver_offset)); - __ bind(&ok); - } - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. // Reserve space for the stack slots needed by the code. - int slots = GetStackSlotCount(); + int slots = StackSlotCount(); if (slots > 0) { if (FLAG_debug_code) { __ mov(r0, Operand(slots)); @@ -189,7 +155,7 @@ bool LCodeGen::GeneratePrologue() { FastNewContextStub stub(heap_slots); __ CallStub(&stub); } else { - __ CallRuntime(Runtime::kNewFunctionContext, 1); + __ CallRuntime(Runtime::kNewContext, 1); } RecordSafepoint(Safepoint::kNoDeoptimizationIndex); // Context is returned in both r0 and cp. It replaces the context @@ -264,43 +230,13 @@ bool LCodeGen::GenerateDeferredCode() { __ jmp(code->exit()); } - // Force constant pool emission at the end of the deferred code to make - // sure that no constant pools are emitted after. + // Force constant pool emission at the end of deferred code to make + // sure that no constant pools are emitted after the official end of + // the instruction sequence. masm()->CheckConstPool(true, false); - return !is_aborted(); -} - - -bool LCodeGen::GenerateDeoptJumpTable() { - // Check that the jump table is accessible from everywhere in the function - // code, ie that offsets to the table can be encoded in the 24bit signed - // immediate of a branch instruction. - // To simplify we consider the code size from the first instruction to the - // end of the jump table. We also don't consider the pc load delta. - // Each entry in the jump table generates one instruction and inlines one - // 32bit data after it. - if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + - deopt_jump_table_.length() * 2)) { - Abort("Generated code is too large"); - } - - // Block the constant pool emission during the jump table emission. - __ BlockConstPoolFor(deopt_jump_table_.length()); - __ RecordComment("[ Deoptimisation jump table"); - Label table_start; - __ bind(&table_start); - for (int i = 0; i < deopt_jump_table_.length(); i++) { - __ bind(&deopt_jump_table_[i].label); - __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); - __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); - } - ASSERT(masm()->InstructionsGeneratedSince(&table_start) == - deopt_jump_table_.length() * 2); - __ RecordComment("]"); - - // The deoptimization jump table is the last part of the instruction - // sequence. Mark the generated code as done unless we bailed out. + // Deferred code is the last part of the instruction sequence. Mark + // the generated code as done unless we bailed out. if (!is_aborted()) status_ = DONE; return !is_aborted(); } @@ -308,7 +244,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { bool LCodeGen::GenerateSafepointTable() { ASSERT(is_done()); - safepoints_.Emit(masm(), GetStackSlotCount()); + safepoints_.Emit(masm(), StackSlotCount()); return !is_aborted(); } @@ -504,7 +440,7 @@ void LCodeGen::AddToTranslation(Translation* translation, translation->StoreDoubleStackSlot(op->index()); } else if (op->IsArgument()) { ASSERT(is_tagged); - int src_index = GetStackSlotCount() + op->index(); + int src_index = StackSlotCount() + op->index(); translation->StoreStackSlot(src_index); } else if (op->IsRegister()) { Register reg = ToRegister(op); @@ -545,7 +481,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, } -void LCodeGen::CallRuntime(const Runtime::Function* function, +void LCodeGen::CallRuntime(Runtime::Function* function, int num_arguments, LInstruction* instr) { ASSERT(instr != NULL); @@ -640,18 +576,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { return; } - if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); - if (cc == al) { + if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); __ Jump(entry, RelocInfo::RUNTIME_ENTRY); } else { - // We often have several deopts to the same entry, reuse the last - // jump entry if this is the case. - if (deopt_jump_table_.is_empty() || - (deopt_jump_table_.last().address != entry)) { - deopt_jump_table_.Add(JumpTableEntry(entry)); + if (FLAG_trap_on_deopt) { + Label done; + __ b(&done, NegateCondition(cc)); + __ stop("trap_on_deopt"); + __ Jump(entry, RelocInfo::RUNTIME_ENTRY); + __ bind(&done); + } else { + __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc); } - __ b(cc, &deopt_jump_table_.last().label); } } @@ -661,14 +598,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { if (length == 0) return; ASSERT(FLAG_deopt); Handle<DeoptimizationInputData> data = - factory()->NewDeoptimizationInputData(length, TENURED); + Factory::NewDeoptimizationInputData(length, TENURED); Handle<ByteArray> translations = translations_.CreateByteArray(); data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); Handle<FixedArray> literals = - factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); + Factory::NewFixedArray(deoptimization_literals_.length(), TENURED); for (int i = 0; i < deoptimization_literals_.length(); i++) { literals->set(i, *deoptimization_literals_[i]); } @@ -770,7 +707,7 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles( void LCodeGen::RecordPosition(int position) { - if (position == RelocInfo::kNoPosition) return; + if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); } @@ -783,7 +720,7 @@ void LCodeGen::DoLabel(LLabel* label) { } __ bind(label->label()); current_block_ = label->block_id(); - DoGap(label); + LCodeGen::DoGap(label); } @@ -809,11 +746,6 @@ void LCodeGen::DoGap(LGap* gap) { } -void LCodeGen::DoInstructionGap(LInstructionGap* instr) { - DoGap(instr); -} - - void LCodeGen::DoParameter(LParameter* instr) { // Nothing to do. } @@ -837,6 +769,15 @@ void LCodeGen::DoCallStub(LCallStub* instr) { CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } + case CodeStub::StringCharAt: { + StringCharAtStub stub; + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + case CodeStub::MathPow: { + Abort("MathPowStub unimplemented."); + break; + } case CodeStub::NumberToString: { NumberToStringStub stub; CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); @@ -854,8 +795,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) { } case CodeStub::TranscendentalCache: { __ ldr(r0, MemOperand(sp, 0)); - TranscendentalCacheStub stub(instr->transcendental_type(), - TranscendentalCacheStub::TAGGED); + TranscendentalCacheStub stub(instr->transcendental_type()); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); break; } @@ -871,92 +811,55 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { void LCodeGen::DoModI(LModI* instr) { - if (instr->hydrogen()->HasPowerOf2Divisor()) { - Register dividend = ToRegister(instr->InputAt(0)); - Register result = ToRegister(instr->result()); - - int32_t divisor = - HConstant::cast(instr->hydrogen()->right())->Integer32Value(); - - if (divisor < 0) divisor = -divisor; - - Label positive_dividend, done; - __ cmp(dividend, Operand(0)); - __ b(pl, &positive_dividend); - __ rsb(result, dividend, Operand(0)); - __ and_(result, result, Operand(divisor - 1), SetCC); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr->environment()); + class DeferredModI: public LDeferredCode { + public: + DeferredModI(LCodeGen* codegen, LModI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { + codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD); } - __ rsb(result, result, Operand(0)); - __ b(&done); - __ bind(&positive_dividend); - __ and_(result, dividend, Operand(divisor - 1)); - __ bind(&done); - return; - } - + private: + LModI* instr_; + }; // These registers hold untagged 32 bit values. Register left = ToRegister(instr->InputAt(0)); Register right = ToRegister(instr->InputAt(1)); Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); - DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1)); - DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2)); - DwVfpRegister quotient = double_scratch0(); - - ASSERT(!dividend.is(divisor)); - ASSERT(!dividend.is(quotient)); - ASSERT(!divisor.is(quotient)); - ASSERT(!scratch.is(left)); - ASSERT(!scratch.is(right)); - ASSERT(!scratch.is(result)); - - Label done, vfp_modulo, both_positive, right_negative; + Label deoptimize, done; // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand(0)); - DeoptimizeIf(eq, instr->environment()); + __ tst(right, Operand(right)); + __ b(eq, &deoptimize); } - __ Move(result, left); - - // (0 % x) must yield 0 (if x is finite, which is the case here). - __ cmp(left, Operand(0)); - __ b(eq, &done); - // Preload right in a vfp register. - __ vmov(divisor.low(), right); - __ b(lt, &vfp_modulo); - - __ cmp(left, Operand(right)); - __ b(lt, &done); - - // Check for (positive) power of two on the right hand side. - __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, - scratch, - &right_negative, - &both_positive); - // Perform modulo operation (scratch contains right - 1). - __ and_(result, scratch, Operand(left)); - __ b(&done); - - __ bind(&right_negative); - // Negate right. The sign of the divisor does not matter. - __ rsb(right, right, Operand(0)); + // Check for (0 % -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label ok; + __ tst(left, Operand(left)); + __ b(ne, &ok); + __ tst(right, Operand(right)); + __ b(pl, &ok); + __ b(al, &deoptimize); + __ bind(&ok); + } - __ bind(&both_positive); + // Try a few common cases before using the stub. + Label call_stub; const int kUnfolds = 3; + // Skip if either side is negative. + __ cmp(left, Operand(0)); + __ cmp(right, Operand(0), NegateCondition(mi)); + __ b(mi, &call_stub); // If the right hand side is smaller than the (nonnegative) - // left hand side, the left hand side is the result. - // Else try a few subtractions of the left hand side. + // left hand side, it is the result. Else try a few subtractions + // of the left hand side. __ mov(scratch, left); for (int i = 0; i < kUnfolds; i++) { // Check if the left hand side is less or equal than the // the right hand side. - __ cmp(scratch, Operand(right)); + __ cmp(scratch, right); __ mov(result, scratch, LeaveCC, lt); __ b(lt, &done); // If not, reduce the left hand side by the right hand @@ -964,45 +867,28 @@ void LCodeGen::DoModI(LModI* instr) { if (i < kUnfolds - 1) __ sub(scratch, scratch, right); } - __ bind(&vfp_modulo); - // Load the arguments in VFP registers. - // The divisor value is preloaded before. Be careful that 'right' is only live - // on entry. - __ vmov(dividend.low(), left); - // From here on don't use right as it may have been reallocated (for example - // to scratch2). - right = no_reg; - - __ vcvt_f64_s32(dividend, dividend.low()); - __ vcvt_f64_s32(divisor, divisor.low()); - - // We do not care about the sign of the divisor. - __ vabs(divisor, divisor); - // Compute the quotient and round it to a 32bit integer. - __ vdiv(quotient, dividend, divisor); - __ vcvt_s32_f64(quotient.low(), quotient); - __ vcvt_f64_s32(quotient, quotient.low()); - - // Compute the remainder in result. - DwVfpRegister double_scratch = dividend; - __ vmul(double_scratch, divisor, quotient); - __ vcvt_s32_f64(double_scratch.low(), double_scratch); - __ vmov(scratch, double_scratch.low()); - - if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ sub(result, left, scratch); - } else { - Label ok; - // Check for -0. - __ sub(scratch2, left, scratch, SetCC); - __ b(ne, &ok); - __ cmp(left, Operand(0)); - DeoptimizeIf(mi, instr->environment()); - __ bind(&ok); - // Load the result and we are done. - __ mov(result, scratch2); - } + // Check for power of two on the right hand side. + __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub); + // Perform modulo operation (scratch contains right - 1). + __ and_(result, scratch, Operand(left)); + + __ bind(&call_stub); + // Call the stub. The numbers in r0 and r1 have + // to be tagged to Smis. If that is not possible, deoptimize. + DeferredModI* deferred = new DeferredModI(this, instr); + __ TrySmiTag(left, &deoptimize, scratch); + __ TrySmiTag(right, &deoptimize, scratch); + + __ b(al, deferred->entry()); + __ bind(deferred->exit()); + + // If the result in r0 is a Smi, untag it, else deoptimize. + __ JumpIfNotSmi(result, &deoptimize); + __ SmiUntag(result); + __ b(al, &done); + __ bind(&deoptimize); + DeoptimizeIf(al, instr->environment()); __ bind(&done); } @@ -1026,16 +912,16 @@ void LCodeGen::DoDivI(LDivI* instr) { // Check for x / 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand(0)); + __ tst(right, right); DeoptimizeIf(eq, instr->environment()); } // Check for (0 / -x) that will produce negative zero. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { Label left_not_zero; - __ cmp(left, Operand(0)); + __ tst(left, Operand(left)); __ b(ne, &left_not_zero); - __ cmp(right, Operand(0)); + __ tst(right, Operand(right)); DeoptimizeIf(mi, instr->environment()); __ bind(&left_not_zero); } @@ -1108,7 +994,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, __ mov(r0, right); __ mov(r1, left); } - BinaryOpStub stub(op, OVERWRITE_LEFT); + TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT); __ CallStub(&stub); RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), 0, @@ -1120,125 +1006,59 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, void LCodeGen::DoMulI(LMulI* instr) { Register scratch = scratch0(); - Register result = ToRegister(instr->result()); - // Note that result may alias left. Register left = ToRegister(instr->InputAt(0)); - LOperand* right_op = instr->InputAt(1); + Register right = EmitLoadRegister(instr->InputAt(1), scratch); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - bool bailout_on_minus_zero = - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); - - if (right_op->IsConstantOperand() && !can_overflow) { - // Use optimized code for specific constants. - int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); - - if (bailout_on_minus_zero && (constant < 0)) { - // The case of a null constant will be handled separately. - // If constant is negative and left is null, the result should be -0. - __ cmp(left, Operand(0)); - DeoptimizeIf(eq, instr->environment()); - } - - switch (constant) { - case -1: - __ rsb(result, left, Operand(0)); - break; - case 0: - if (bailout_on_minus_zero) { - // If left is strictly negative and the constant is null, the - // result is -0. Deoptimize if required, otherwise return 0. - __ cmp(left, Operand(0)); - DeoptimizeIf(mi, instr->environment()); - } - __ mov(result, Operand(0)); - break; - case 1: - __ Move(result, left); - break; - default: - // Multiplying by powers of two and powers of two plus or minus - // one can be done faster with shifted operands. - // For other constants we emit standard code. - int32_t mask = constant >> 31; - uint32_t constant_abs = (constant + mask) ^ mask; - - if (IsPowerOf2(constant_abs) || - IsPowerOf2(constant_abs - 1) || - IsPowerOf2(constant_abs + 1)) { - if (IsPowerOf2(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ mov(result, Operand(left, LSL, shift)); - } else if (IsPowerOf2(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ add(result, left, Operand(left, LSL, shift)); - } else if (IsPowerOf2(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ rsb(result, left, Operand(left, LSL, shift)); - } - - // Correct the sign of the result is the constant is negative. - if (constant < 0) __ rsb(result, result, Operand(0)); - - } else { - // Generate standard code. - __ mov(ip, Operand(constant)); - __ mul(result, left, ip); - } - } + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && + !instr->InputAt(1)->IsConstantOperand()) { + __ orr(ToRegister(instr->TempAt(0)), left, right); + } + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + // scratch:left = left * right. + __ smull(left, scratch, left, right); + __ mov(ip, Operand(left, ASR, 31)); + __ cmp(ip, Operand(scratch)); + DeoptimizeIf(ne, instr->environment()); } else { - Register right = EmitLoadRegister(right_op, scratch); - if (bailout_on_minus_zero) { - __ orr(ToRegister(instr->TempAt(0)), left, right); - } + __ mul(left, left, right); + } - if (can_overflow) { - // scratch:result = left * right. - __ smull(result, scratch, left, right); - __ cmp(scratch, Operand(result, ASR, 31)); - DeoptimizeIf(ne, instr->environment()); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // Bail out if the result is supposed to be negative zero. + Label done; + __ tst(left, Operand(left)); + __ b(ne, &done); + if (instr->InputAt(1)->IsConstantOperand()) { + if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) { + DeoptimizeIf(al, instr->environment()); + } } else { - __ mul(result, left, right); - } - - if (bailout_on_minus_zero) { - // Bail out if the result is supposed to be negative zero. - Label done; - __ cmp(result, Operand(0)); - __ b(ne, &done); + // Test the non-zero operand for negative sign. __ cmp(ToRegister(instr->TempAt(0)), Operand(0)); DeoptimizeIf(mi, instr->environment()); - __ bind(&done); } + __ bind(&done); } } void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->InputAt(0); - LOperand* right_op = instr->InputAt(1); - ASSERT(left_op->IsRegister()); - Register left = ToRegister(left_op); - Register result = ToRegister(instr->result()); - Operand right(no_reg); - - if (right_op->IsStackSlot() || right_op->IsArgument()) { - right = Operand(EmitLoadRegister(right_op, ip)); - } else { - ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); - right = ToOperand(right_op); - } - + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + ASSERT(left->Equals(instr->result())); + ASSERT(left->IsRegister()); + Register result = ToRegister(left); + Register right_reg = EmitLoadRegister(right, ip); switch (instr->op()) { case Token::BIT_AND: - __ and_(result, left, right); + __ and_(result, ToRegister(left), Operand(right_reg)); break; case Token::BIT_OR: - __ orr(result, left, right); + __ orr(result, ToRegister(left), Operand(right_reg)); break; case Token::BIT_XOR: - __ eor(result, left, right); + __ eor(result, ToRegister(left), Operand(right_reg)); break; default: UNREACHABLE(); @@ -1248,62 +1068,54 @@ void LCodeGen::DoBitI(LBitI* instr) { void LCodeGen::DoShiftI(LShiftI* instr) { - // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so - // result may alias either of them. - LOperand* right_op = instr->InputAt(1); - Register left = ToRegister(instr->InputAt(0)); - Register result = ToRegister(instr->result()); Register scratch = scratch0(); - if (right_op->IsRegister()) { - // Mask the right_op operand. - __ and_(scratch, ToRegister(right_op), Operand(0x1F)); + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + ASSERT(left->Equals(instr->result())); + ASSERT(left->IsRegister()); + Register result = ToRegister(left); + if (right->IsRegister()) { + // Mask the right operand. + __ and_(scratch, ToRegister(right), Operand(0x1F)); switch (instr->op()) { case Token::SAR: - __ mov(result, Operand(left, ASR, scratch)); + __ mov(result, Operand(result, ASR, scratch)); break; case Token::SHR: if (instr->can_deopt()) { - __ mov(result, Operand(left, LSR, scratch), SetCC); + __ mov(result, Operand(result, LSR, scratch), SetCC); DeoptimizeIf(mi, instr->environment()); } else { - __ mov(result, Operand(left, LSR, scratch)); + __ mov(result, Operand(result, LSR, scratch)); } break; case Token::SHL: - __ mov(result, Operand(left, LSL, scratch)); + __ mov(result, Operand(result, LSL, scratch)); break; default: UNREACHABLE(); break; } } else { - // Mask the right_op operand. - int value = ToInteger32(LConstantOperand::cast(right_op)); + int value = ToInteger32(LConstantOperand::cast(right)); uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); switch (instr->op()) { case Token::SAR: if (shift_count != 0) { - __ mov(result, Operand(left, ASR, shift_count)); - } else { - __ Move(result, left); + __ mov(result, Operand(result, ASR, shift_count)); } break; case Token::SHR: - if (shift_count != 0) { - __ mov(result, Operand(left, LSR, shift_count)); + if (shift_count == 0 && instr->can_deopt()) { + __ tst(result, Operand(0x80000000)); + DeoptimizeIf(ne, instr->environment()); } else { - if (instr->can_deopt()) { - __ tst(left, Operand(0x80000000)); - DeoptimizeIf(ne, instr->environment()); - } - __ Move(result, left); + __ mov(result, Operand(result, LSR, shift_count)); } break; case Token::SHL: if (shift_count != 0) { - __ mov(result, Operand(left, LSL, shift_count)); - } else { - __ Move(result, left); + __ mov(result, Operand(result, LSL, shift_count)); } break; default: @@ -1315,21 +1127,11 @@ void LCodeGen::DoShiftI(LShiftI* instr) { void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->InputAt(0); - LOperand* right = instr->InputAt(1); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - SBit set_cond = can_overflow ? SetCC : LeaveCC; - - if (right->IsStackSlot() || right->IsArgument()) { - Register right_reg = EmitLoadRegister(right, ip); - __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); - } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); - __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); - } - - if (can_overflow) { + Register left = ToRegister(instr->InputAt(0)); + Register right = EmitLoadRegister(instr->InputAt(1), ip); + ASSERT(instr->InputAt(0)->Equals(instr->result())); + __ sub(left, left, right, SetCC); + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(vs, instr->environment()); } } @@ -1345,7 +1147,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); double v = instr->value(); - __ Vmov(result, v); + __ vmov(result, v); } @@ -1362,10 +1164,10 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { } -void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) { +void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) { Register result = ToRegister(instr->result()); Register array = ToRegister(instr->InputAt(0)); - __ ldr(result, FieldMemOperand(array, ExternalArray::kLengthOffset)); + __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset)); } @@ -1376,34 +1178,19 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) { } -void LCodeGen::DoElementsKind(LElementsKind* instr) { - Register result = ToRegister(instr->result()); - Register input = ToRegister(instr->InputAt(0)); - - // Load map into |result|. - __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset)); - // Load the map's "bit field 2" into |result|. We only need the first byte, - // but the following bit field extraction takes care of that anyway. - __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset)); - // Retrieve elements_kind from bit field 2. - __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); -} - - void LCodeGen::DoValueOf(LValueOf* instr) { Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); Register map = ToRegister(instr->TempAt(0)); + ASSERT(input.is(result)); Label done; // If the object is a smi return the object. __ tst(input, Operand(kSmiTagMask)); - __ Move(result, input, eq); __ b(eq, &done); // If the object is not a value type, return the object. __ CompareObjectType(input, map, map, JS_VALUE_TYPE); - __ Move(result, input, ne); __ b(ne, &done); __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset)); @@ -1412,9 +1199,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) { void LCodeGen::DoBitNotI(LBitNotI* instr) { - Register input = ToRegister(instr->InputAt(0)); - Register result = ToRegister(instr->result()); - __ mvn(result, Operand(input)); + LOperand* input = instr->InputAt(0); + ASSERT(input->Equals(instr->result())); + __ mvn(ToRegister(input), Operand(ToRegister(input))); } @@ -1432,19 +1219,12 @@ void LCodeGen::DoThrow(LThrow* instr) { void LCodeGen::DoAddI(LAddI* instr) { LOperand* left = instr->InputAt(0); LOperand* right = instr->InputAt(1); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - SBit set_cond = can_overflow ? SetCC : LeaveCC; + ASSERT(left->Equals(instr->result())); - if (right->IsStackSlot() || right->IsArgument()) { - Register right_reg = EmitLoadRegister(right, ip); - __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); - } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); - __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); - } + Register right_reg = EmitLoadRegister(right, ip); + __ add(ToRegister(left), ToRegister(left), Operand(right_reg), SetCC); - if (can_overflow) { + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(vs, instr->environment()); } } @@ -1453,31 +1233,29 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { DoubleRegister left = ToDoubleRegister(instr->InputAt(0)); DoubleRegister right = ToDoubleRegister(instr->InputAt(1)); - DoubleRegister result = ToDoubleRegister(instr->result()); switch (instr->op()) { case Token::ADD: - __ vadd(result, left, right); + __ vadd(left, left, right); break; case Token::SUB: - __ vsub(result, left, right); + __ vsub(left, left, right); break; case Token::MUL: - __ vmul(result, left, right); + __ vmul(left, left, right); break; case Token::DIV: - __ vdiv(result, left, right); + __ vdiv(left, left, right); break; case Token::MOD: { // Save r0-r3 on the stack. __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); - __ PrepareCallCFunction(0, 2, scratch0()); - __ SetCallCDoubleArguments(left, right); - __ CallCFunction( - ExternalReference::double_fp_operation(Token::MOD, isolate()), - 0, 2); + __ PrepareCallCFunction(4, scratch0()); + __ vmov(r0, r1, left); + __ vmov(r2, r3, right); + __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); // Move the result in the double result register. - __ GetCFunctionDoubleResult(result); + __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result())); // Restore r0-r3. __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); @@ -1495,7 +1273,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->InputAt(1)).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); - BinaryOpStub stub(instr->op(), NO_OVERWRITE); + TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -1531,7 +1309,7 @@ void LCodeGen::DoBranch(LBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Representation r = instr->hydrogen()->value()->representation(); + Representation r = instr->hydrogen()->representation(); if (r.IsInteger32()) { Register reg = ToRegister(instr->InputAt(0)); __ cmp(reg, Operand(0)); @@ -1547,7 +1325,7 @@ void LCodeGen::DoBranch(LBranch* instr) { } else { ASSERT(r.IsTagged()); Register reg = ToRegister(instr->InputAt(0)); - if (instr->hydrogen()->value()->type().IsBoolean()) { + if (instr->hydrogen()->type().IsBoolean()) { __ LoadRoot(ip, Heap::kTrueValueRootIndex); __ cmp(reg, ip); EmitBranch(true_block, false_block, eq); @@ -1566,11 +1344,12 @@ void LCodeGen::DoBranch(LBranch* instr) { __ b(eq, false_label); __ cmp(reg, Operand(0)); __ b(eq, false_label); - __ JumpIfSmi(reg, true_label); + __ tst(reg, Operand(kSmiTagMask)); + __ b(eq, true_label); // Test double values. Zero and NaN are false. Label call_stub; - DoubleRegister dbl_scratch = double_scratch0(); + DoubleRegister dbl_scratch = d0; Register scratch = scratch0(); __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); @@ -1598,17 +1377,45 @@ void LCodeGen::DoBranch(LBranch* instr) { } -void LCodeGen::EmitGoto(int block) { +void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) { block = chunk_->LookupDestination(block); int next_block = GetNextEmittedBlock(current_block_); if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); + // Perform stack overflow check if this goto needs it before jumping. + if (deferred_stack_check != NULL) { + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, chunk_->GetAssemblyLabel(block)); + __ jmp(deferred_stack_check->entry()); + deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block)); + } else { + __ jmp(chunk_->GetAssemblyLabel(block)); + } } } +void LCodeGen::DoDeferredStackCheck(LGoto* instr) { + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr); +} + + void LCodeGen::DoGoto(LGoto* instr) { - EmitGoto(instr->block_id()); + class DeferredStackCheck: public LDeferredCode { + public: + DeferredStackCheck(LCodeGen* codegen, LGoto* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } + private: + LGoto* instr_; + }; + + DeferredStackCheck* deferred = NULL; + if (instr->include_stack_check()) { + deferred = new DeferredStackCheck(this, instr); + } + EmitGoto(instr->block_id(), deferred); } @@ -1645,6 +1452,34 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { } +void LCodeGen::DoCmpID(LCmpID* instr) { + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + LOperand* result = instr->result(); + Register scratch = scratch0(); + + Label unordered, done; + if (instr->is_double()) { + // Compare left and right as doubles and load the + // resulting flags into the normal status register. + __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); + // If a NaN is involved, i.e. the result is unordered (V set), + // jump to unordered to return false. + __ b(vs, &unordered); + } else { + EmitCmpI(left, right); + } + + Condition cc = TokenToCondition(instr->op(), instr->is_double()); + __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex); + __ b(cc, &done); + + __ bind(&unordered); + __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex); + __ bind(&done); +} + + void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { LOperand* left = instr->InputAt(0); LOperand* right = instr->InputAt(1); @@ -1667,27 +1502,62 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { } -void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { +void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { Register left = ToRegister(instr->InputAt(0)); Register right = ToRegister(instr->InputAt(1)); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - int true_block = chunk_->LookupDestination(instr->true_block_id()); + Register result = ToRegister(instr->result()); __ cmp(left, Operand(right)); - EmitBranch(true_block, false_block, eq); + __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); } -void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { +void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) { Register left = ToRegister(instr->InputAt(0)); - int true_block = chunk_->LookupDestination(instr->true_block_id()); + Register right = ToRegister(instr->InputAt(1)); int false_block = chunk_->LookupDestination(instr->false_block_id()); + int true_block = chunk_->LookupDestination(instr->true_block_id()); - __ cmp(left, Operand(instr->hydrogen()->right())); + __ cmp(left, Operand(right)); EmitBranch(true_block, false_block, eq); } +void LCodeGen::DoIsNull(LIsNull* instr) { + Register reg = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(reg, ip); + if (instr->is_strict()) { + __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); + } else { + Label true_value, false_value, done; + __ b(eq, &true_value); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(ip, reg); + __ b(eq, &true_value); + __ tst(reg, Operand(kSmiTagMask)); + __ b(eq, &false_value); + // Check for undetectable objects by looking in the bit field in + // the map. The object has already been smi checked. + Register scratch = result; + __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); + __ tst(scratch, Operand(1 << Map::kIsUndetectable)); + __ b(ne, &true_value); + __ bind(&false_value); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ jmp(&done); + __ bind(&true_value); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + __ bind(&done); + } +} + + void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { Register scratch = scratch0(); Register reg = ToRegister(instr->InputAt(0)); @@ -1709,7 +1579,8 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(reg, ip); __ b(eq, true_label); - __ JumpIfSmi(reg, false_label); + __ tst(reg, Operand(kSmiTagMask)); + __ b(eq, false_label); // Check for undetectable objects by looking in the bit field in // the map. The object has already been smi checked. __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); @@ -1722,13 +1593,13 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { Condition LCodeGen::EmitIsObject(Register input, Register temp1, + Register temp2, Label* is_not_object, Label* is_object) { - Register temp2 = scratch0(); __ JumpIfSmi(input, is_not_object); - __ LoadRoot(temp2, Heap::kNullValueRootIndex); - __ cmp(input, temp2); + __ LoadRoot(temp1, Heap::kNullValueRootIndex); + __ cmp(input, temp1); __ b(eq, is_object); // Load map. @@ -1740,13 +1611,33 @@ Condition LCodeGen::EmitIsObject(Register input, // Load instance type and check that it is in object type range. __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); - __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, is_not_object); - __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE)); return le; } +void LCodeGen::DoIsObject(LIsObject* instr) { + Register reg = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register temp = scratch0(); + Label is_false, is_true, done; + + Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true); + __ b(true_cond, &is_true); + + __ bind(&is_false); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ b(&done); + + __ bind(&is_true); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + + __ bind(&done); +} + + void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { Register reg = ToRegister(instr->InputAt(0)); Register temp1 = ToRegister(instr->TempAt(0)); @@ -1758,38 +1649,36 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { Label* false_label = chunk_->GetAssemblyLabel(false_block); Condition true_cond = - EmitIsObject(reg, temp1, false_label, true_label); + EmitIsObject(reg, temp1, temp2, false_label, true_label); EmitBranch(true_block, false_block, true_cond); } -void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - +void LCodeGen::DoIsSmi(LIsSmi* instr) { + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + Register result = ToRegister(instr->result()); Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); __ tst(input_reg, Operand(kSmiTagMask)); - EmitBranch(true_block, false_block, eq); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + Label done; + __ b(eq, &done); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ bind(&done); } -void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->InputAt(0)); - Register temp = ToRegister(instr->TempAt(0)); - +void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block)); - __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); - __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - __ tst(temp, Operand(1 << Map::kIsUndetectable)); - EmitBranch(true_block, false_block, ne); + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); + __ tst(input_reg, Operand(kSmiTagMask)); + EmitBranch(true_block, false_block, eq); } -static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { +static InstanceType TestType(HHasInstanceType* instr) { InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; @@ -1798,7 +1687,7 @@ static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { } -static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { +static Condition BranchCondition(HHasInstanceType* instr) { InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == to) return eq; @@ -1809,6 +1698,23 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { } +void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + Label done; + __ tst(input, Operand(kSmiTagMask)); + __ LoadRoot(result, Heap::kFalseValueRootIndex, eq); + __ b(eq, &done); + __ CompareObjectType(input, result, result, TestType(instr->hydrogen())); + Condition cond = BranchCondition(instr->hydrogen()); + __ LoadRoot(result, Heap::kTrueValueRootIndex, cond); + __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond)); + __ bind(&done); +} + + void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { Register scratch = scratch0(); Register input = ToRegister(instr->InputAt(0)); @@ -1818,7 +1724,8 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { Label* false_label = chunk_->GetAssemblyLabel(false_block); - __ JumpIfSmi(input, false_label); + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, false_label); __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen())); @@ -1828,13 +1735,24 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); + Register scratch = scratch0(); - if (FLAG_debug_code) { - __ AbortIfNotString(input); - } + __ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(scratch, result); +} + + +void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); - __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); - __ IndexFromHash(result, result); + ASSERT(instr->hydrogen()->value()->representation().IsTagged()); + __ ldr(scratch, + FieldMemOperand(input, String::kHashFieldOffset)); + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); + __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); + __ LoadRoot(result, Heap::kFalseValueRootIndex, ne); } @@ -1863,28 +1781,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Register temp2) { ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. - __ JumpIfSmi(input, is_false); - __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, is_false); + __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE); __ b(lt, is_false); // Map is now in temp. // Functions have class 'Function'. - __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE); + __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - __ b(ge, is_true); + __ b(eq, is_true); } else { - __ b(ge, is_false); + __ b(eq, is_false); } // Check if the constructor in the map is a function. __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); // Objects with a non-function constructor have class 'Object'. __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); @@ -1910,6 +1828,27 @@ void LCodeGen::EmitClassOfTest(Label* is_true, } +void LCodeGen::DoClassOfTest(LClassOfTest* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + ASSERT(input.is(result)); + Handle<String> class_name = instr->hydrogen()->class_name(); + + Label done, is_true, is_false; + + EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input); + __ b(ne, &is_false); + + __ bind(&is_true); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + __ jmp(&done); + + __ bind(&is_false); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ bind(&done); +} + + void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Register input = ToRegister(instr->InputAt(0)); Register temp = scratch0(); @@ -1947,9 +1886,24 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { InstanceofStub stub(InstanceofStub::kArgsInRegisters); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ cmp(r0, Operand(0)); - __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); - __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); + Label true_value, done; + __ tst(r0, r0); + __ mov(r0, Operand(Factory::false_value()), LeaveCC, ne); + __ mov(r0, Operand(Factory::true_value()), LeaveCC, eq); +} + + +void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { + ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0. + ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1. + + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + InstanceofStub stub(InstanceofStub::kArgsInRegisters); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ tst(r0, Operand(r0)); + EmitBranch(true_block, false_block, eq); } @@ -1994,13 +1948,13 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - __ mov(ip, Operand(factory()->the_hole_value())); + __ mov(ip, Operand(Factory::the_hole_value())); __ cmp(map, Operand(ip)); __ b(ne, &cache_miss); // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch // with true or false. - __ mov(result, Operand(factory()->the_hole_value())); + __ mov(result, Operand(Factory::the_hole_value())); __ b(&done); // The inlined call site cache did not match. Check null and string before @@ -2107,6 +2061,25 @@ void LCodeGen::DoCmpT(LCmpT* instr) { } +void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) { + Token::Value op = instr->op(); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + Handle<Code> ic = CompareIC::GetUninitialized(op); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + // The compare stub expects compare condition and the input operands + // reversed for GT and LTE. + Condition condition = ComputeCompareCondition(op); + if (op == Token::GT || op == Token::LTE) { + condition = ReverseCondition(condition); + } + __ cmp(r0, Operand(0)); + EmitBranch(true_block, false_block, condition); +} + + void LCodeGen::DoReturn(LReturn* instr) { if (FLAG_trace) { // Push the return value on the stack as the parameter. @@ -2114,7 +2087,7 @@ void LCodeGen::DoReturn(LReturn* instr) { __ push(r0); __ CallRuntime(Runtime::kTraceExit, 1); } - int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; + int32_t sp_delta = (ParameterCount() + 1) * kPointerSize; __ mov(sp, fp); __ ldm(ia_w, sp, fp.bit() | lr.bit()); __ add(sp, sp, Operand(sp_delta)); @@ -2122,7 +2095,7 @@ void LCodeGen::DoReturn(LReturn* instr) { } -void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { +void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) { Register result = ToRegister(instr->result()); __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); @@ -2134,19 +2107,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { } -void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { - ASSERT(ToRegister(instr->global_object()).is(r0)); - ASSERT(ToRegister(instr->result()).is(r0)); - - __ mov(r2, Operand(instr->name())); - RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET - : RelocInfo::CODE_TARGET_CONTEXT; - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, mode, instr); -} - - -void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { +void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { Register value = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); @@ -2171,18 +2132,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { } -void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { - ASSERT(ToRegister(instr->global_object()).is(r1)); - ASSERT(ToRegister(instr->value()).is(r0)); - - __ mov(r2, Operand(instr->name())); - Handle<Code> ic = instr->strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); -} - - void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); @@ -2213,83 +2162,13 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } -void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, - Register object, - Handle<Map> type, - Handle<String> name) { - LookupResult lookup; - type->LookupInDescriptors(NULL, *name, &lookup); - ASSERT(lookup.IsProperty() && - (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); - if (lookup.type() == FIELD) { - int index = lookup.GetLocalFieldIndexFromMap(*type); - int offset = index * kPointerSize; - if (index < 0) { - // Negative property indices are in-object properties, indexed - // from the end of the fixed part of the object. - __ ldr(result, FieldMemOperand(object, offset + type->instance_size())); - } else { - // Non-negative property indices are in the properties array. - __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); - } - } else { - Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); - LoadHeapObject(result, Handle<HeapObject>::cast(function)); - } -} - - -void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { - Register object = ToRegister(instr->object()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - int map_count = instr->hydrogen()->types()->length(); - Handle<String> name = instr->hydrogen()->name(); - if (map_count == 0) { - ASSERT(instr->hydrogen()->need_generic()); - __ mov(r2, Operand(name)); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - Label done; - __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - for (int i = 0; i < map_count - 1; ++i) { - Handle<Map> map = instr->hydrogen()->types()->at(i); - Label next; - __ cmp(scratch, Operand(map)); - __ b(ne, &next); - EmitLoadFieldOrConstantFunction(result, object, map, name); - __ b(&done); - __ bind(&next); - } - Handle<Map> map = instr->hydrogen()->types()->last(); - __ cmp(scratch, Operand(map)); - if (instr->hydrogen()->need_generic()) { - Label generic; - __ b(ne, &generic); - EmitLoadFieldOrConstantFunction(result, object, map, name); - __ b(&done); - __ bind(&generic); - __ mov(r2, Operand(name)); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - DeoptimizeIf(ne, instr->environment()); - EmitLoadFieldOrConstantFunction(result, object, map, name); - } - __ bind(&done); - } -} - - void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); // Name is always in r2. __ mov(r2, Operand(instr->name())); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2345,37 +2224,27 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset)); if (FLAG_debug_code) { - Label done, fail; + Label done; __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(scratch, ip); __ b(eq, &done); - __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ cmp(scratch, ip); __ b(eq, &done); - // |scratch| still contains |input|'s map. - __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); - __ ubfx(scratch, scratch, Map::kElementsKindShift, - Map::kElementsKindBitCount); - __ cmp(scratch, Operand(JSObject::FAST_ELEMENTS)); - __ b(eq, &done); - __ cmp(scratch, Operand(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); - __ b(lt, &fail); - __ cmp(scratch, Operand(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); - __ b(le, &done); - __ bind(&fail); - __ Abort("Check for fast or external elements failed."); + __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); + __ cmp(scratch, ip); + __ Check(eq, "Check for fast elements failed."); __ bind(&done); } } -void LCodeGen::DoLoadExternalArrayPointer( - LLoadExternalArrayPointer* instr) { +void LCodeGen::DoLoadPixelArrayExternalPointer( + LLoadPixelArrayExternalPointer* instr) { Register to_reg = ToRegister(instr->result()); Register from_reg = ToRegister(instr->InputAt(0)); - __ ldr(to_reg, FieldMemOperand(from_reg, - ExternalArray::kExternalPointerOffset)); + __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset)); } @@ -2402,90 +2271,26 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { Register key = EmitLoadRegister(instr->key(), scratch0()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); + ASSERT(result.is(elements)); // Load the result. __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr->environment()); - } + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + __ cmp(result, scratch); + DeoptimizeIf(eq, instr->environment()); } -void LCodeGen::DoLoadKeyedSpecializedArrayElement( - LLoadKeyedSpecializedArrayElement* instr) { - Register external_pointer = ToRegister(instr->external_pointer()); - Register key = no_reg; - JSObject::ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int shift_size = ElementsKindToShiftSize(elements_kind); - - if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS || - elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); - DwVfpRegister result(ToDoubleRegister(instr->result())); - Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) - : Operand(key, LSL, shift_size)); - __ add(scratch0(), external_pointer, operand); - if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), 0); - __ vcvt_f64_f32(result, result.low()); - } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), 0); - } - } else { - Register result(ToRegister(instr->result())); - MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: - __ ldrsb(result, mem_operand); - break; - case JSObject::EXTERNAL_PIXEL_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ ldrb(result, mem_operand); - break; - case JSObject::EXTERNAL_SHORT_ELEMENTS: - __ ldrsh(result, mem_operand); - break; - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ ldrh(result, mem_operand); - break; - case JSObject::EXTERNAL_INT_ELEMENTS: - __ ldr(result, mem_operand); - break; - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ ldr(result, mem_operand); - __ cmp(result, Operand(0x80000000)); - // TODO(danno): we could be more clever here, perhaps having a special - // version of the stub that detects if the overflow case actually - // happens, and generate code that returns a double rather than int. - DeoptimizeIf(cs, instr->environment()); - break; - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::FAST_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } +void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) { + Register external_elements = ToRegister(instr->external_pointer()); + Register key = ToRegister(instr->key()); + Register result = ToRegister(instr->result()); + + // Load the result. + __ ldrb(result, MemOperand(external_elements, key)); } @@ -2493,7 +2298,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r1)); ASSERT(ToRegister(instr->key()).is(r0)); - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2547,26 +2352,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { ASSERT(function.is(r1)); // Required by InvokeFunction. ASSERT(ToRegister(instr->result()).is(r0)); - // If the receiver is null or undefined, we have to pass the global - // object as a receiver to normal functions. Values have to be - // passed unchanged to builtins and strict-mode functions. + // If the receiver is null or undefined, we have to pass the global object + // as a receiver. Label global_object, receiver_ok; - - // Do not transform the receiver to object for strict mode - // functions. - __ ldr(scratch, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(scratch, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(scratch, - Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); - __ b(ne, &receiver_ok); - - // Do not transform the receiver to object for builtins. - __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &receiver_ok); - - // Normal function. Replace undefined or null with global receiver. __ LoadRoot(scratch, Heap::kNullValueRootIndex); __ cmp(receiver, scratch); __ b(eq, &global_object); @@ -2577,14 +2365,12 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // Deoptimize if the receiver is not a JS object. __ tst(receiver, Operand(kSmiTagMask)); DeoptimizeIf(eq, instr->environment()); - __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); - DeoptimizeIf(lt, instr->environment()); + __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE); + DeoptimizeIf(lo, instr->environment()); __ jmp(&receiver_ok); __ bind(&global_object); __ ldr(receiver, GlobalObjectOperand()); - __ ldr(receiver, - FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); __ bind(&receiver_ok); // Copy the arguments to this function possibly from the @@ -2604,7 +2390,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // stack. Label invoke, loop; // length is a small non-negative integer, due to the test above. - __ cmp(length, Operand(0)); + __ tst(length, Operand(length)); __ b(eq, &invoke); __ bind(&loop); __ ldr(scratch, MemOperand(elements, length, LSL, 2)); @@ -2624,8 +2410,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // The number of arguments is stored in receiver which is r0, as expected // by InvokeFunction. v8::internal::ParameterCount actual(receiver); - __ InvokeFunction(function, actual, CALL_FUNCTION, - safepoint_generator, CALL_AS_METHOD); + __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -2641,12 +2426,6 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { } -void LCodeGen::DoThisFunction(LThisFunction* instr) { - Register result = ToRegister(instr->result()); - __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -} - - void LCodeGen::DoContext(LContext* instr) { Register result = ToRegister(instr->result()); __ mov(result, cp); @@ -2657,7 +2436,8 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); __ ldr(result, - MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); + MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset)); } @@ -2677,11 +2457,10 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, int arity, - LInstruction* instr, - CallKind call_kind) { + LInstruction* instr) { // Change context if needed. bool change_context = - (info()->closure()->context() != function->context()) || + (graph()->info()->closure()->context() != function->context()) || scope()->contains_with() || (scope()->num_heap_slots() > 0); if (change_context) { @@ -2698,7 +2477,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, RecordPosition(pointers->position()); // Invoke function. - __ SetCallKind(r5, call_kind); __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ Call(ip); @@ -2713,16 +2491,13 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); __ mov(r1, Operand(instr->function())); - CallKnownFunction(instr->function(), - instr->arity(), - instr, - CALL_AS_METHOD); + CallKnownFunction(instr->function(), instr->arity(), instr); } void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { + ASSERT(instr->InputAt(0)->Equals(instr->result())); Register input = ToRegister(instr->InputAt(0)); - Register result = ToRegister(instr->result()); Register scratch = scratch0(); // Deoptimize if not a heap number. @@ -2736,10 +2511,10 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { scratch = no_reg; __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); // Check the sign of the argument. If the argument is positive, just - // return it. + // return it. We do not need to patch the stack since |input| and + // |result| are the same register and |input| would be restored + // unchanged by popping safepoint registers. __ tst(exponent, Operand(HeapNumber::kSignMask)); - // Move the input to the result if necessary. - __ Move(result, input); __ b(eq, &done); // Input is negative. Reverse its sign. @@ -2779,7 +2554,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - __ StoreToSafepointRegisterSlot(tmp1, result); + __ StoreToSafepointRegisterSlot(tmp1, input); } __ bind(&done); @@ -2788,13 +2563,11 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { Register input = ToRegister(instr->InputAt(0)); - Register result = ToRegister(instr->result()); __ cmp(input, Operand(0)); - __ Move(result, input, pl); // We can make rsb conditional because the previous cmp instruction // will clear the V (overflow) flag and rsb won't set this flag // if input is positive. - __ rsb(result, input, Operand(0), SetCC, mi); + __ rsb(input, input, Operand(0), SetCC, mi); // Deoptimize on overflow. DeoptimizeIf(vs, instr->environment()); } @@ -2814,11 +2587,11 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { LUnaryMathOperation* instr_; }; + ASSERT(instr->InputAt(0)->Equals(instr->result())); Representation r = instr->hydrogen()->value()->representation(); if (r.IsDouble()) { DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); - DwVfpRegister result = ToDoubleRegister(instr->result()); - __ vabs(result, input); + __ vabs(input, input); } else if (r.IsInteger32()) { EmitIntegerMathAbs(instr); } else { @@ -2852,65 +2625,23 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { // Move the result back to general purpose register r0. __ vmov(result, single_scratch); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - Label done; - __ cmp(result, Operand(0)); - __ b(ne, &done); - __ vmov(scratch1, input.high()); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr->environment()); - __ bind(&done); - } + // Test for -0. + Label done; + __ cmp(result, Operand(0)); + __ b(ne, &done); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); } void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); - Register scratch1 = result; - Register scratch2 = scratch0(); - Label done, check_sign_on_zero; - - // Extract exponent bits. - __ vmov(scratch1, input.high()); - __ ubfx(scratch2, - scratch1, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // If the number is in ]-0.5, +0.5[, the result is +/- 0. - __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2)); - __ mov(result, Operand(0), LeaveCC, le); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ b(le, &check_sign_on_zero); - } else { - __ b(le, &done); - } - - // The following conversion will not work with numbers - // outside of ]-2^32, 2^32[. - __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32)); - DeoptimizeIf(ge, instr->environment()); - - // Save the original sign for later comparison. - __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask)); - - __ Vmov(double_scratch0(), 0.5); - __ vadd(input, input, double_scratch0()); - - // Check sign of the result: if the sign changed, the input - // value was in ]0.5, 0[ and the result should be -0. - __ vmov(scratch1, input.high()); - __ eor(scratch1, scratch1, Operand(scratch2), SetCC); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(mi, instr->environment()); - } else { - __ mov(result, Operand(0), LeaveCC, mi); - __ b(mi, &done); - } - - __ EmitVFPTruncate(kRoundToMinusInf, + Register scratch1 = scratch0(); + Register scratch2 = result; + __ EmitVFPTruncate(kRoundToNearest, double_scratch0().low(), input, scratch1, @@ -2918,32 +2649,21 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { DeoptimizeIf(ne, instr->environment()); __ vmov(result, double_scratch0().low()); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ cmp(result, Operand(0)); - __ b(ne, &done); - __ bind(&check_sign_on_zero); - __ vmov(scratch1, input.high()); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr->environment()); - } + // Test for -0. + Label done; + __ cmp(result, Operand(0)); + __ b(ne, &done); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); __ bind(&done); } void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); - DoubleRegister result = ToDoubleRegister(instr->result()); - __ vsqrt(result, input); -} - - -void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); - DoubleRegister result = ToDoubleRegister(instr->result()); - // Add +0 to convert -0 to +0. - __ vadd(result, input, kDoubleRegZero); - __ vsqrt(result, result); + ASSERT(ToDoubleRegister(instr->result()).is(input)); + __ vsqrt(input, input); } @@ -2955,18 +2675,17 @@ void LCodeGen::DoPower(LPower* instr) { Representation exponent_type = instr->hydrogen()->right()->representation(); if (exponent_type.IsDouble()) { // Prepare arguments and call C function. - __ PrepareCallCFunction(0, 2, scratch); - __ SetCallCDoubleArguments(ToDoubleRegister(left), - ToDoubleRegister(right)); - __ CallCFunction( - ExternalReference::power_double_double_function(isolate()), 0, 2); + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, ToDoubleRegister(right)); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); } else if (exponent_type.IsInteger32()) { ASSERT(ToRegister(right).is(r0)); // Prepare arguments and call C function. - __ PrepareCallCFunction(1, 1, scratch); - __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right)); - __ CallCFunction( - ExternalReference::power_double_int_function(isolate()), 1, 1); + __ PrepareCallCFunction(4, scratch); + __ mov(r2, ToRegister(right)); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ CallCFunction(ExternalReference::power_double_int_function(), 4); } else { ASSERT(exponent_type.IsTagged()); ASSERT(instr->hydrogen()->left()->representation().IsDouble()); @@ -2996,40 +2715,16 @@ void LCodeGen::DoPower(LPower* instr) { // Prepare arguments and call C function. __ bind(&call); - __ PrepareCallCFunction(0, 2, scratch); - __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg); - __ CallCFunction( - ExternalReference::power_double_double_function(isolate()), 0, 2); + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, result_reg); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); } // Store the result in the result register. __ GetCFunctionDoubleResult(result_reg); } -void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { - ASSERT(ToDoubleRegister(instr->result()).is(d2)); - TranscendentalCacheStub stub(TranscendentalCache::LOG, - TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { - ASSERT(ToDoubleRegister(instr->result()).is(d2)); - TranscendentalCacheStub stub(TranscendentalCache::COS, - TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - -void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { - ASSERT(ToDoubleRegister(instr->result()).is(d2)); - TranscendentalCacheStub stub(TranscendentalCache::SIN, - TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { switch (instr->op()) { case kMathAbs: @@ -3044,18 +2739,6 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { case kMathSqrt: DoMathSqrt(instr); break; - case kMathPowHalf: - DoMathPowHalf(instr); - break; - case kMathCos: - DoMathCos(instr); - break; - case kMathSin: - DoMathSin(instr); - break; - case kMathLog: - DoMathLog(instr); - break; default: Abort("Unimplemented type of LUnaryMathOperation."); UNREACHABLE(); @@ -3063,27 +2746,11 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { } -void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { - ASSERT(ToRegister(instr->function()).is(r1)); - ASSERT(instr->HasPointerMap()); - ASSERT(instr->HasDeoptimizationEnvironment()); - LPointerMap* pointers = instr->pointer_map(); - LEnvironment* env = instr->deoptimization_environment(); - RecordPosition(pointers->position()); - RegisterEnvironmentForDeoptimization(env); - SafepointGenerator generator(this, pointers, env->deoptimization_index()); - ParameterCount count(instr->arity()); - __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); -} - - void LCodeGen::DoCallKeyed(LCallKeyed* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - Handle<Code> ic = - isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); + Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); CallCode(ic, RelocInfo::CODE_TARGET, instr); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3093,11 +2760,9 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - RelocInfo::Mode mode = RelocInfo::CODE_TARGET; - Handle<Code> ic = - isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode); + Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP); __ mov(r2, Operand(instr->name())); - CallCode(ic, mode, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3107,7 +2772,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -3118,11 +2783,9 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; - Handle<Code> ic = - isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode); + Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP); __ mov(r2, Operand(instr->name())); - CallCode(ic, mode, instr); + CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3130,7 +2793,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(r0)); __ mov(r1, Operand(instr->target())); - CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); + CallKnownFunction(instr->target(), instr->arity(), instr); } @@ -3138,7 +2801,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->InputAt(0)).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); - Handle<Code> builtin = isolate()->builtins()->JSConstructCall(); + Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall)); __ mov(r0, Operand(instr->arity())); CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr); } @@ -3187,9 +2850,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { // Name is always in r2. __ mov(r2, Operand(instr->name())); - Handle<Code> ic = instr->strict_mode() - ? isolate()->builtins()->StoreIC_Initialize_Strict() - : isolate()->builtins()->StoreIC_Initialize(); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -3226,89 +2889,18 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { } -void LCodeGen::DoStoreKeyedSpecializedArrayElement( - LStoreKeyedSpecializedArrayElement* instr) { - - Register external_pointer = ToRegister(instr->external_pointer()); - Register key = no_reg; - JSObject::ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int shift_size = ElementsKindToShiftSize(elements_kind); - - if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS || - elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); - DwVfpRegister value(ToDoubleRegister(instr->value())); - Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) - : Operand(key, LSL, shift_size)); - __ add(scratch0(), external_pointer, operand); - if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { - __ vcvt_f32_f64(double_scratch0().low(), value); - __ vstr(double_scratch0().low(), scratch0(), 0); - } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS - __ vstr(value, scratch0(), 0); - } - } else { - Register value(ToRegister(instr->value())); - MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); - switch (elements_kind) { - case JSObject::EXTERNAL_PIXEL_ELEMENTS: - case JSObject::EXTERNAL_BYTE_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(value, mem_operand); - break; - case JSObject::EXTERNAL_SHORT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(value, mem_operand); - break; - case JSObject::EXTERNAL_INT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(value, mem_operand); - break; - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::FAST_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r2)); ASSERT(ToRegister(instr->key()).is(r1)); ASSERT(ToRegister(instr->value()).is(r0)); - Handle<Code> ic = instr->strict_mode() - ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() - : isolate()->builtins()->KeyedStoreIC_Initialize(); + Handle<Code> ic(Builtins::builtin( + info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); CallCode(ic, RelocInfo::CODE_TARGET, instr); } -void LCodeGen::DoStringAdd(LStringAdd* instr) { - __ push(ToRegister(instr->left())); - __ push(ToRegister(instr->right())); - StringAddStub stub(NO_STRING_CHECK_IN_STUB); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); -} - - void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { class DeferredStringCharCodeAt: public LDeferredCode { public: @@ -3447,53 +3039,6 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { } -void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { - class DeferredStringCharFromCode: public LDeferredCode { - public: - DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } - private: - LStringCharFromCode* instr_; - }; - - DeferredStringCharFromCode* deferred = - new DeferredStringCharFromCode(this, instr); - - ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - ASSERT(!char_code.is(result)); - - __ cmp(char_code, Operand(String::kMaxAsciiCharCode)); - __ b(hi, deferred->entry()); - __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); - __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); - __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(result, ip); - __ b(eq, deferred->entry()); - __ bind(deferred->exit()); -} - - -void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { - Register char_code = ToRegister(instr->char_code()); - Register result = ToRegister(instr->result()); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, Operand(0)); - - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ SmiTag(char_code); - __ push(char_code); - CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); - __ StoreToSafepointRegisterSlot(r0, result); -} - - void LCodeGen::DoStringLength(LStringLength* instr) { Register string = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); @@ -3542,8 +3087,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { Label slow; Register reg = ToRegister(instr->InputAt(0)); - DoubleRegister dbl_scratch = double_scratch0(); - SwVfpRegister flt_scratch = dbl_scratch.low(); + DoubleRegister dbl_scratch = d0; + SwVfpRegister flt_scratch = s0; // Preserve the value of all registers. PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); @@ -3637,52 +3182,44 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister() && input->Equals(instr->result())); if (instr->needs_check()) { - ASSERT(kHeapObjectTag == 1); - // If the input is a HeapObject, SmiUntag will set the carry flag. - __ SmiUntag(ToRegister(input), SetCC); - DeoptimizeIf(cs, instr->environment()); - } else { - __ SmiUntag(ToRegister(input)); + __ tst(ToRegister(input), Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); } + __ SmiUntag(ToRegister(input)); } void LCodeGen::EmitNumberUntagD(Register input_reg, DoubleRegister result_reg, - bool deoptimize_on_undefined, LEnvironment* env) { Register scratch = scratch0(); - SwVfpRegister flt_scratch = double_scratch0().low(); - ASSERT(!result_reg.is(double_scratch0())); + SwVfpRegister flt_scratch = s0; + ASSERT(!result_reg.is(d0)); Label load_smi, heap_number, done; // Smi check. - __ JumpIfSmi(input_reg, &load_smi); + __ tst(input_reg, Operand(kSmiTagMask)); + __ b(eq, &load_smi); // Heap number map check. __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(scratch, Operand(ip)); - if (deoptimize_on_undefined) { - DeoptimizeIf(ne, env); - } else { - Label heap_number; - __ b(eq, &heap_number); + __ b(eq, &heap_number); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, Operand(ip)); - DeoptimizeIf(ne, env); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(input_reg, Operand(ip)); + DeoptimizeIf(ne, env); - // Convert undefined to NaN. - __ LoadRoot(ip, Heap::kNanValueRootIndex); - __ sub(ip, ip, Operand(kHeapObjectTag)); - __ vldr(result_reg, ip, HeapNumber::kValueOffset); - __ jmp(&done); + // Convert undefined to NaN. + __ LoadRoot(ip, Heap::kNanValueRootIndex); + __ sub(ip, ip, Operand(kHeapObjectTag)); + __ vldr(result_reg, ip, HeapNumber::kValueOffset); + __ jmp(&done); - __ bind(&heap_number); - } // Heap number to double register conversion. + __ bind(&heap_number); __ sub(ip, input_reg, Operand(kHeapObjectTag)); __ vldr(result_reg, ip, HeapNumber::kValueOffset); __ jmp(&done); @@ -3708,36 +3245,19 @@ class DeferredTaggedToI: public LDeferredCode { void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->InputAt(0)); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); - DwVfpRegister double_scratch = double_scratch0(); - SwVfpRegister single_scratch = double_scratch.low(); - - ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); - ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); - Label done; - - // The input was optimistically untagged; revert it. - // The carry flag is set when we reach this deferred code as we just executed - // SmiUntag(heap_object, SetCC) - ASSERT(kHeapObjectTag == 1); - __ adc(input_reg, input_reg, Operand(input_reg)); + Register input_reg = ToRegister(instr->InputAt(0)); + Register scratch = scratch0(); + DoubleRegister dbl_scratch = d0; + SwVfpRegister flt_scratch = s0; + DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0)); // Heap number map check. - __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); + __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch1, Operand(ip)); + __ cmp(scratch, Operand(ip)); if (instr->truncating()) { - Register scratch3 = ToRegister(instr->TempAt(1)); - DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2)); - ASSERT(!scratch3.is(input_reg) && - !scratch3.is(scratch1) && - !scratch3.is(scratch2)); - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. Label heap_number; __ b(eq, &heap_number); // Check for undefined. Undefined is converted to zero for truncating @@ -3749,38 +3269,36 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ b(&done); __ bind(&heap_number); - __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); - __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); - - __ EmitECMATruncate(input_reg, - double_scratch2, - single_scratch, - scratch1, - scratch2, - scratch3); + __ sub(ip, input_reg, Operand(kHeapObjectTag)); + __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset); + __ vcmp(dbl_tmp, 0.0); // Sets overflow bit in FPSCR flags if NaN. + __ vcvt_s32_f64(flt_scratch, dbl_tmp); + __ vmov(input_reg, flt_scratch); // 32-bit result of conversion. + __ vmrs(pc); // Move vector status bits to normal status bits. + // Overflow bit is set if dbl_tmp is Nan. + __ cmn(input_reg, Operand(1), vc); // 0x7fffffff + 1 -> overflow. + __ cmp(input_reg, Operand(1), vc); // 0x80000000 - 1 -> overflow. + DeoptimizeIf(vs, instr->environment()); // Saturation may have occured. } else { - CpuFeatures::Scope scope(VFP3); // Deoptimize if we don't have a heap number. DeoptimizeIf(ne, instr->environment()); __ sub(ip, input_reg, Operand(kHeapObjectTag)); - __ vldr(double_scratch, ip, HeapNumber::kValueOffset); - __ EmitVFPTruncate(kRoundToZero, - single_scratch, - double_scratch, - scratch1, - scratch2, - kCheckForInexactConversion); - DeoptimizeIf(ne, instr->environment()); - // Load the result. - __ vmov(input_reg, single_scratch); - + __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset); + __ vcvt_s32_f64(flt_scratch, dbl_tmp); + __ vmov(input_reg, flt_scratch); // 32-bit result of conversion. + // Non-truncating conversion means that we cannot lose bits, so we convert + // back to check; note that using non-overlapping s and d regs would be + // slightly faster. + __ vcvt_f64_s32(dbl_scratch, flt_scratch); + __ VFPCompareAndSetFlags(dbl_scratch, dbl_tmp); + DeoptimizeIf(ne, instr->environment()); // Not equal or unordered. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmp(input_reg, Operand(0)); + __ tst(input_reg, Operand(input_reg)); __ b(ne, &done); - __ vmov(scratch1, double_scratch.high()); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); + __ vmov(lr, ip, dbl_tmp); + __ tst(ip, Operand(1 << 31)); // Test sign bit. DeoptimizeIf(ne, instr->environment()); } } @@ -3797,12 +3315,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); - // Optimistically untag the input. - // If the input is a HeapObject, SmiUntag will set the carry flag. - __ SmiUntag(input_reg, SetCC); - // Branch to deferred code if the input was tagged. - // The deferred code will take care of restoring the tag. - __ b(cs, deferred->entry()); + // Smi check. + __ tst(input_reg, Operand(kSmiTagMask)); + __ b(ne, deferred->entry()); + + // Smi to int32 conversion + __ SmiUntag(input_reg); // Untag smi. + __ bind(deferred->exit()); } @@ -3816,100 +3335,83 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { Register input_reg = ToRegister(input); DoubleRegister result_reg = ToDoubleRegister(result); - EmitNumberUntagD(input_reg, result_reg, - instr->hydrogen()->deoptimize_on_undefined(), - instr->environment()); + EmitNumberUntagD(input_reg, result_reg, instr->environment()); } void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Register result_reg = ToRegister(instr->result()); + LOperand* input = instr->InputAt(0); + ASSERT(input->IsDoubleRegister()); + LOperand* result = instr->result(); + ASSERT(result->IsRegister()); + + DoubleRegister double_input = ToDoubleRegister(input); + Register result_reg = ToRegister(result); + SwVfpRegister single_scratch = double_scratch0().low(); Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->TempAt(0)); - DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0)); - DwVfpRegister double_scratch = double_scratch0(); - SwVfpRegister single_scratch = double_scratch0().low(); - Label done; + __ EmitVFPTruncate(kRoundToZero, + single_scratch, + double_input, + scratch1, + scratch2); - if (instr->truncating()) { - Register scratch3 = ToRegister(instr->TempAt(1)); - __ EmitECMATruncate(result_reg, - double_input, - single_scratch, - scratch1, - scratch2, - scratch3); - } else { - VFPRoundingMode rounding_mode = kRoundToMinusInf; - __ EmitVFPTruncate(rounding_mode, - single_scratch, - double_input, - scratch1, - scratch2, - kCheckForInexactConversion); - // Deoptimize if we had a vfp invalid exception, - // including inexact operation. + // Deoptimize if we had a vfp invalid exception. + DeoptimizeIf(ne, instr->environment()); + + // Retrieve the result. + __ vmov(result_reg, single_scratch); + + if (!instr->truncating()) { + // Convert result back to double and compare with input + // to check if the conversion was exact. + __ vmov(single_scratch, result_reg); + __ vcvt_f64_s32(double_scratch0(), single_scratch); + __ VFPCompareAndSetFlags(double_scratch0(), double_input); DeoptimizeIf(ne, instr->environment()); - // Retrieve the result. - __ vmov(result_reg, single_scratch); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ cmp(result_reg, Operand(0)); + __ b(ne, &done); + // Check for -0. + __ vmov(scratch1, double_input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + + __ bind(&done); + } } - __ bind(&done); } void LCodeGen::DoCheckSmi(LCheckSmi* instr) { LOperand* input = instr->InputAt(0); + ASSERT(input->IsRegister()); __ tst(ToRegister(input), Operand(kSmiTagMask)); - DeoptimizeIf(ne, instr->environment()); -} - - -void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - LOperand* input = instr->InputAt(0); - __ tst(ToRegister(input), Operand(kSmiTagMask)); - DeoptimizeIf(eq, instr->environment()); + DeoptimizeIf(instr->condition(), instr->environment()); } void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { Register input = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); + InstanceType first = instr->hydrogen()->first(); + InstanceType last = instr->hydrogen()->last(); __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(first)); - if (instr->hydrogen()->is_interval_check()) { - InstanceType first; - InstanceType last; - instr->hydrogen()->GetCheckInterval(&first, &last); - - __ cmp(scratch, Operand(first)); - - // If there is only one type in the interval check for equality. - if (first == last) { - DeoptimizeIf(ne, instr->environment()); - } else { - DeoptimizeIf(lo, instr->environment()); - // Omit check for the last type. - if (last != LAST_TYPE) { - __ cmp(scratch, Operand(last)); - DeoptimizeIf(hi, instr->environment()); - } - } + // If there is only one type in the interval check for equality. + if (first == last) { + DeoptimizeIf(ne, instr->environment()); } else { - uint8_t mask; - uint8_t tag; - instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); - - if (IsPowerOf2(mask)) { - ASSERT(tag == 0 || IsPowerOf2(tag)); - __ tst(scratch, Operand(mask)); - DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); - } else { - __ and_(scratch, scratch, Operand(mask)); - __ cmp(scratch, Operand(tag)); - DeoptimizeIf(ne, instr->environment()); + DeoptimizeIf(lo, instr->environment()); + // Omit check for the last type. + if (last != LAST_TYPE) { + __ cmp(scratch, Operand(last)); + DeoptimizeIf(hi, instr->environment()); } } } @@ -3934,64 +3436,11 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) { } -void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); - __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); -} - - -void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { - Register unclamped_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - __ ClampUint8(result_reg, unclamped_reg); -} - - -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { - Register scratch = scratch0(); - Register input_reg = ToRegister(instr->unclamped()); - Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); - Label is_smi, done, heap_number; - - // Both smi and heap number cases are handled. - __ JumpIfSmi(input_reg, &is_smi); - - // Check for heap number - __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ cmp(scratch, Operand(factory()->heap_number_map())); - __ b(eq, &heap_number); - - // Check for undefined. Undefined is converted to zero for clamping - // conversions. - __ cmp(input_reg, Operand(factory()->undefined_value())); - DeoptimizeIf(ne, instr->environment()); - __ movt(input_reg, 0); - __ jmp(&done); - - // Heap number - __ bind(&heap_number); - __ vldr(double_scratch0(), FieldMemOperand(input_reg, - HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); - __ jmp(&done); - - // smi - __ bind(&is_smi); - __ SmiUntag(result_reg, input_reg); - __ ClampUint8(result_reg, result_reg); - - __ bind(&done); -} - - void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) { - if (heap()->InNewSpace(*object)) { + if (Heap::InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = - factory()->NewJSGlobalPropertyCell(object); + Factory::NewJSGlobalPropertyCell(object); __ mov(result, Operand(cell)); __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); } else { @@ -4073,13 +3522,6 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { } -void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->InputAt(0)).is(r0)); - __ push(r0); - CallRuntime(Runtime::kToFastProperties, 1, instr); -} - - void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { Label materialized; // Registers will be used as follows: @@ -4140,17 +3582,16 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // space for nested functions that don't need literals cloning. Handle<SharedFunctionInfo> shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); - if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub( - shared_info->strict_mode() ? kStrictMode : kNonStrictMode); + if (shared_info->num_literals() == 0 && !pretenure) { + FastNewClosureStub stub; __ mov(r1, Operand(shared_info)); __ push(r1); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { __ mov(r2, Operand(shared_info)); __ mov(r1, Operand(pretenure - ? factory()->true_value() - : factory()->false_value())); + ? Factory::true_value() + : Factory::false_value())); __ Push(cp, r2, r1); CallRuntime(Runtime::kNewClosure, 3, instr); } @@ -4164,6 +3605,29 @@ void LCodeGen::DoTypeof(LTypeof* instr) { } +void LCodeGen::DoTypeofIs(LTypeofIs* instr) { + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Label true_label; + Label false_label; + Label done; + + Condition final_branch_condition = EmitTypeofIs(&true_label, + &false_label, + input, + instr->type_literal()); + __ b(final_branch_condition, &true_label); + __ bind(&false_label); + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ b(&done); + + __ bind(&true_label); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + + __ bind(&done); +} + + void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { Register input = ToRegister(instr->InputAt(0)); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -4186,56 +3650,71 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Handle<String> type_name) { Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); - if (type_name->Equals(heap()->number_symbol())) { - __ JumpIfSmi(input, true_label); + if (type_name->Equals(Heap::number_symbol())) { + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, true_label); __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(input, Operand(ip)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->string_symbol())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE); - __ b(ge, false_label); + } else if (type_name->Equals(Heap::string_symbol())) { + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, false_label); + __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); - final_branch_condition = eq; + __ b(ne, false_label); + __ CompareInstanceType(input, scratch, FIRST_NONSTRING_TYPE); + final_branch_condition = lo; - } else if (type_name->Equals(heap()->boolean_symbol())) { - __ CompareRoot(input, Heap::kTrueValueRootIndex); + } else if (type_name->Equals(Heap::boolean_symbol())) { + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(input, ip); __ b(eq, true_label); - __ CompareRoot(input, Heap::kFalseValueRootIndex); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(input, ip); final_branch_condition = eq; - } else if (type_name->Equals(heap()->undefined_symbol())) { - __ CompareRoot(input, Heap::kUndefinedValueRootIndex); + } else if (type_name->Equals(Heap::undefined_symbol())) { + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(input, ip); __ b(eq, true_label); - __ JumpIfSmi(input, false_label); + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, false_label); // Check for undetectable objects => true. __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); final_branch_condition = ne; - } else if (type_name->Equals(heap()->function_symbol())) { - __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, input, scratch, - FIRST_CALLABLE_SPEC_OBJECT_TYPE); - final_branch_condition = ge; + } else if (type_name->Equals(Heap::function_symbol())) { + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, false_label); + __ CompareObjectType(input, input, scratch, JS_FUNCTION_TYPE); + __ b(eq, true_label); + // Regular expressions => 'function' (they are callable). + __ CompareInstanceType(input, scratch, JS_REGEXP_TYPE); + final_branch_condition = eq; - } else if (type_name->Equals(heap()->object_symbol())) { - __ JumpIfSmi(input, false_label); - __ CompareRoot(input, Heap::kNullValueRootIndex); + } else if (type_name->Equals(Heap::object_symbol())) { + __ tst(input, Operand(kSmiTagMask)); + __ b(eq, false_label); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(input, ip); __ b(eq, true_label); - __ CompareObjectType(input, input, scratch, - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); - __ b(lt, false_label); - __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); - __ b(gt, false_label); + // Regular expressions => 'function', not 'object'. + __ CompareObjectType(input, input, scratch, JS_REGEXP_TYPE); + __ b(eq, false_label); // Check for undetectable objects => false. __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); - final_branch_condition = eq; + __ b(ne, false_label); + // Check for JS objects => true. + __ CompareInstanceType(input, scratch, FIRST_JS_OBJECT_TYPE); + __ b(lo, false_label); + __ CompareInstanceType(input, scratch, LAST_JS_OBJECT_TYPE); + final_branch_condition = ls; } else { final_branch_condition = ne; @@ -4247,6 +3726,26 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } +void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { + Register result = ToRegister(instr->result()); + Label true_label; + Label false_label; + Label done; + + EmitIsConstructCall(result, scratch0()); + __ b(eq, &true_label); + + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ b(&done); + + + __ bind(&true_label); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + + __ bind(&done); +} + + void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { Register temp1 = ToRegister(instr->TempAt(0)); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -4301,62 +3800,19 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { SafepointGenerator safepoint_generator(this, pointers, env->deoptimization_index()); - __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator); -} - - -void LCodeGen::DoIn(LIn* instr) { - Register obj = ToRegister(instr->object()); - Register key = ToRegister(instr->key()); - __ Push(key, obj); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); - LPointerMap* pointers = instr->pointer_map(); - LEnvironment* env = instr->deoptimization_environment(); - RecordPosition(pointers->position()); - RegisterEnvironmentForDeoptimization(env); - SafepointGenerator safepoint_generator(this, - pointers, - env->deoptimization_index()); - __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator); -} - - -void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr); + __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator); } void LCodeGen::DoStackCheck(LStackCheck* instr) { - class DeferredStackCheck: public LDeferredCode { - public: - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } - private: - LStackCheck* instr_; - }; - - if (instr->hydrogen()->is_function_entry()) { - // Perform stack overflow check. - Label done; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &done); - StackCheckStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); - __ bind(&done); - } else { - ASSERT(instr->hydrogen()->is_backwards_branch()); - // Perform stack overflow check if this goto needs it before jumping. - DeferredStackCheck* deferred_stack_check = - new DeferredStackCheck(this, instr); - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(lo, deferred_stack_check->entry()); - __ bind(instr->done_label()); - deferred_stack_check->SetExit(instr->done_label()); - } + // Perform stack overflow check. + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + StackCheckStub stub; + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ bind(&ok); } @@ -4377,8 +3833,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) { } - - #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index ead8489034..393b6423e3 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -51,10 +51,9 @@ class LCodeGen BASE_EMBEDDED { current_instruction_(-1), instructions_(chunk->instructions()), deoptimizations_(4), - deopt_jump_table_(4), deoptimization_literals_(8), inlined_function_count_(0), - scope_(info->scope()), + scope_(chunk->graph()->info()->scope()), status_(UNUSED), deferred_(8), osr_pc_offset_(-1), @@ -66,10 +65,6 @@ class LCodeGen BASE_EMBEDDED { // Simple accessors. MacroAssembler* masm() const { return masm_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const { return info_->isolate(); } - Factory* factory() const { return isolate()->factory(); } - Heap* heap() const { return isolate()->heap(); } // Support for converting LOperands to assembler types. // LOperand must be a register. @@ -108,15 +103,13 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); - void DoDeferredStackCheck(LStackCheck* instr); + void DoDeferredStackCheck(LGoto* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); - void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); // Parallel move support. void DoParallelMove(LParallelMove* move); - void DoGap(LGap* instr); // Emit frame translation commands for an environment. void WriteTranslation(LEnvironment* environment, Translation* translation); @@ -140,7 +133,7 @@ class LCodeGen BASE_EMBEDDED { bool is_aborted() const { return status_ == ABORTED; } int strict_mode_flag() const { - return info()->is_strict_mode() ? kStrictMode : kNonStrictMode; + return info_->is_strict() ? kStrictMode : kNonStrictMode; } LChunk* chunk() const { return chunk_; } @@ -148,7 +141,7 @@ class LCodeGen BASE_EMBEDDED { HGraph* graph() const { return chunk_->graph(); } Register scratch0() { return r9; } - DwVfpRegister double_scratch0() { return d15; } + DwVfpRegister double_scratch0() { return d0; } int GetNextEmittedBlock(int block); LInstruction* GetNextInstruction(); @@ -160,8 +153,8 @@ class LCodeGen BASE_EMBEDDED { Register temporary, Register temporary2); - int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - int GetParameterCount() const { return scope()->num_parameters(); } + int StackSlotCount() const { return chunk()->spill_slot_count(); } + int ParameterCount() const { return scope()->num_parameters(); } void Abort(const char* format, ...); void Comment(const char* format, ...); @@ -173,7 +166,6 @@ class LCodeGen BASE_EMBEDDED { bool GeneratePrologue(); bool GenerateBody(); bool GenerateDeferredCode(); - bool GenerateDeoptJumpTable(); bool GenerateSafepointTable(); enum SafepointMode { @@ -190,14 +182,14 @@ class LCodeGen BASE_EMBEDDED { LInstruction* instr, SafepointMode safepoint_mode); - void CallRuntime(const Runtime::Function* function, + void CallRuntime(Runtime::Function* function, int num_arguments, LInstruction* instr); void CallRuntime(Runtime::FunctionId id, int num_arguments, LInstruction* instr) { - const Runtime::Function* function = Runtime::FunctionForId(id); + Runtime::Function* function = Runtime::FunctionForId(id); CallRuntime(function, num_arguments, instr); } @@ -209,8 +201,7 @@ class LCodeGen BASE_EMBEDDED { // to be in edi. void CallKnownFunction(Handle<JSFunction> function, int arity, - LInstruction* instr, - CallKind call_kind); + LInstruction* instr); void LoadHeapObject(Register result, Handle<HeapObject> object); @@ -237,10 +228,6 @@ class LCodeGen BASE_EMBEDDED { void DoMathFloor(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); - void DoMathPowHalf(LUnaryMathOperation* instr); - void DoMathLog(LUnaryMathOperation* instr); - void DoMathCos(LUnaryMathOperation* instr); - void DoMathSin(LUnaryMathOperation* instr); // Support for recording safepoint and position information. void RecordSafepoint(LPointerMap* pointers, @@ -256,17 +243,13 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordPosition(int position); - int LastSafepointEnd() { - return static_cast<int>(safepoints_.GetPcAfterGap()); - } static Condition TokenToCondition(Token::Value op, bool is_unsigned); - void EmitGoto(int block); + void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL); void EmitBranch(int left_block, int right_block, Condition cc); void EmitCmpI(LOperand* left, LOperand* right); void EmitNumberUntagD(Register input, DoubleRegister result, - bool deoptimize_on_undefined, LEnvironment* env); // Emits optimized code for typeof x == "y". Modifies input register. @@ -280,6 +263,7 @@ class LCodeGen BASE_EMBEDDED { // true and false label should be made, to optimize fallthrough. Condition EmitIsObject(Register input, Register temp1, + Register temp2, Label* is_not_object, Label* is_object); @@ -287,19 +271,6 @@ class LCodeGen BASE_EMBEDDED { // Caller should branch on equal condition. void EmitIsConstructCall(Register temp1, Register temp2); - void EmitLoadFieldOrConstantFunction(Register result, - Register object, - Handle<Map> type, - Handle<String> name); - - struct JumpTableEntry { - explicit inline JumpTableEntry(Address entry) - : label(), - address(entry) { } - Label label; - Address address; - }; - LChunk* const chunk_; MacroAssembler* const masm_; CompilationInfo* const info_; @@ -308,7 +279,6 @@ class LCodeGen BASE_EMBEDDED { int current_instruction_; const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; - ZoneList<JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index 02608a6950..1a2326b748 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -25,8 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" - #include "arm/lithium-gap-resolver-arm.h" #include "arm/lithium-codegen-arm.h" diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 08a1cb9453..9340b61dd8 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -32,21 +32,18 @@ #if defined(V8_TARGET_ARCH_ARM) #include "bootstrapper.h" -#include "codegen.h" +#include "codegen-inl.h" #include "debug.h" #include "runtime.h" namespace v8 { namespace internal { -MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) - : Assembler(arg_isolate, buffer, size), +MacroAssembler::MacroAssembler(void* buffer, int size) + : Assembler(buffer, size), generating_stub_(false), - allow_stub_calls_(true) { - if (isolate() != NULL) { - code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), - isolate()); - } + allow_stub_calls_(true), + code_object_(Heap::undefined_value()) { } @@ -83,7 +80,7 @@ void MacroAssembler::Jump(Register target, Condition cond) { void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { #if USE_BX - mov(ip, Operand(target, rmode)); + mov(ip, Operand(target, rmode), LeaveCC, cond); bx(ip, cond); #else mov(pc, Operand(target, rmode), LeaveCC, cond); @@ -91,7 +88,7 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } -void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, Condition cond) { ASSERT(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond); @@ -106,20 +103,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, } -int MacroAssembler::CallSize(Register target, Condition cond) { -#if USE_BLX - return kInstrSize; -#else - return 2 * kInstrSize; -#endif -} - - void MacroAssembler::Call(Register target, Condition cond) { - // Block constant pool for the call instruction sequence. - BlockConstPoolScope block_const_pool(this); - Label start; - bind(&start); #if USE_BLX blx(target, cond); #else @@ -127,78 +111,54 @@ void MacroAssembler::Call(Register target, Condition cond) { mov(lr, Operand(pc), LeaveCC, cond); mov(pc, Operand(target), LeaveCC, cond); #endif - ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); -} - - -int MacroAssembler::CallSize( - Address target, RelocInfo::Mode rmode, Condition cond) { - int size = 2 * kInstrSize; - Instr mov_instr = cond | MOV | LeaveCC; - intptr_t immediate = reinterpret_cast<intptr_t>(target); - if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) { - size += kInstrSize; - } - return size; } -void MacroAssembler::Call(Address target, - RelocInfo::Mode rmode, +void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, Condition cond) { - // Block constant pool for the call instruction sequence. - BlockConstPoolScope block_const_pool(this); - Label start; - bind(&start); #if USE_BLX // On ARMv5 and after the recommended call sequence is: // ldr ip, [pc, #...] // blx ip - // Statement positions are expected to be recorded when the target - // address is loaded. The mov method will automatically record - // positions when pc is the target, since this is not the case here - // we have to do it explicitly. - positions_recorder()->WriteRecordedPositions(); + // The two instructions (ldr and blx) could be separated by a constant + // pool and the code would still work. The issue comes from the + // patching code which expect the ldr to be just above the blx. + { BlockConstPoolScope block_const_pool(this); + // Statement positions are expected to be recorded when the target + // address is loaded. The mov method will automatically record + // positions when pc is the target, since this is not the case here + // we have to do it explicitly. + positions_recorder()->WriteRecordedPositions(); - mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); - blx(ip, cond); + mov(ip, Operand(target, rmode), LeaveCC, cond); + blx(ip, cond); + } ASSERT(kCallTargetAddressOffset == 2 * kInstrSize); #else // Set lr for return at current pc + 8. mov(lr, Operand(pc), LeaveCC, cond); // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. - mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond); + mov(pc, Operand(target, rmode), LeaveCC, cond); + ASSERT(kCallTargetAddressOffset == kInstrSize); #endif - ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start)); } -int MacroAssembler::CallSize(Handle<Code> code, - RelocInfo::Mode rmode, - unsigned ast_id, - Condition cond) { - return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); +void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, + Condition cond) { + ASSERT(!RelocInfo::IsCodeTarget(rmode)); + Call(reinterpret_cast<intptr_t>(target), rmode, cond); } -void MacroAssembler::Call(Handle<Code> code, - RelocInfo::Mode rmode, - unsigned ast_id, +void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond) { - Label start; - bind(&start); ASSERT(RelocInfo::IsCodeTarget(rmode)); - if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) { - ASSERT(ast_id_for_reloc_info_ == kNoASTId); - ast_id_for_reloc_info_ = ast_id; - rmode = RelocInfo::CODE_TARGET_WITH_ID; - } // 'code' is always generated ARM code, never THUMB code - Call(reinterpret_cast<Address>(code.location()), rmode, cond); - ASSERT_EQ(CallSize(code, rmode, cond), SizeOfCodeGeneratedSince(&start)); + Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond); } @@ -245,29 +205,14 @@ void MacroAssembler::Call(Label* target) { } -void MacroAssembler::Push(Handle<Object> handle) { - mov(ip, Operand(handle)); - push(ip); -} - - void MacroAssembler::Move(Register dst, Handle<Object> value) { mov(dst, Operand(value)); } -void MacroAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src) { if (!dst.is(src)) { - mov(dst, src, LeaveCC, cond); - } -} - - -void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { - ASSERT(CpuFeatures::IsSupported(VFP3)); - CpuFeatures::Scope scope(VFP3); - if (!dst.is(src)) { - vmov(dst, src); + mov(dst, src); } } @@ -283,8 +228,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2, !src2.must_use_constant_pool() && CpuFeatures::IsSupported(ARMv7) && IsPowerOf2(src2.immediate() + 1)) { - ubfx(dst, src1, 0, - WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); + ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond); } else { and_(dst, src1, src2, LeaveCC, cond); @@ -392,6 +336,20 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, } +void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { + // Empty the const pool. + CheckConstPool(true, true); + add(pc, pc, Operand(index, + LSL, + Instruction::kInstrSizeLog2 - kSmiTagSize)); + BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); + nop(); // Jump table alignment. + for (int i = 0; i < targets.length(); i++) { + b(targets[i]); + } +} + + void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { @@ -409,7 +367,7 @@ void MacroAssembler::StoreRoot(Register source, void MacroAssembler::RecordWriteHelper(Register object, Register address, Register scratch) { - if (emit_debug_code()) { + if (FLAG_debug_code) { // Check that the object is not in new space. Label not_in_new_space; InNewSpace(object, scratch, ne, ¬_in_new_space); @@ -437,8 +395,8 @@ void MacroAssembler::InNewSpace(Register object, Condition cond, Label* branch) { ASSERT(cond == eq || cond == ne); - and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); - cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); + and_(scratch, object, Operand(ExternalReference::new_space_mask())); + cmp(scratch, Operand(ExternalReference::new_space_start())); b(cond, branch); } @@ -471,7 +429,7 @@ void MacroAssembler::RecordWrite(Register object, // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. - if (emit_debug_code()) { + if (FLAG_debug_code) { mov(object, Operand(BitCast<int32_t>(kZapValue))); mov(scratch0, Operand(BitCast<int32_t>(kZapValue))); mov(scratch1, Operand(BitCast<int32_t>(kZapValue))); @@ -503,7 +461,7 @@ void MacroAssembler::RecordWrite(Register object, // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. - if (emit_debug_code()) { + if (FLAG_debug_code) { mov(object, Operand(BitCast<int32_t>(kZapValue))); mov(address, Operand(BitCast<int32_t>(kZapValue))); mov(scratch, Operand(BitCast<int32_t>(kZapValue))); @@ -594,36 +552,19 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2, ASSERT_EQ(0, dst1.code() % 2); ASSERT_EQ(dst1.code() + 1, dst2.code()); - // V8 does not use this addressing mode, so the fallback code - // below doesn't support it yet. - ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); - // Generate two ldr instructions if ldrd is not available. if (CpuFeatures::IsSupported(ARMv7)) { CpuFeatures::Scope scope(ARMv7); ldrd(dst1, dst2, src, cond); } else { - if ((src.am() == Offset) || (src.am() == NegOffset)) { - MemOperand src2(src); - src2.set_offset(src2.offset() + 4); - if (dst1.is(src.rn())) { - ldr(dst2, src2, cond); - ldr(dst1, src, cond); - } else { - ldr(dst1, src, cond); - ldr(dst2, src2, cond); - } - } else { // PostIndex or NegPostIndex. - ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex)); - if (dst1.is(src.rn())) { - ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); - ldr(dst1, src, cond); - } else { - MemOperand src2(src); - src2.set_offset(src2.offset() - 4); - ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); - ldr(dst2, src2, cond); - } + MemOperand src2(src); + src2.set_offset(src2.offset() + 4); + if (dst1.is(src.rn())) { + ldr(dst2, src2, cond); + ldr(dst1, src, cond); + } else { + ldr(dst1, src, cond); + ldr(dst2, src2, cond); } } } @@ -636,26 +577,15 @@ void MacroAssembler::Strd(Register src1, Register src2, ASSERT_EQ(0, src1.code() % 2); ASSERT_EQ(src1.code() + 1, src2.code()); - // V8 does not use this addressing mode, so the fallback code - // below doesn't support it yet. - ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); - // Generate two str instructions if strd is not available. if (CpuFeatures::IsSupported(ARMv7)) { CpuFeatures::Scope scope(ARMv7); strd(src1, src2, dst, cond); } else { MemOperand dst2(dst); - if ((dst.am() == Offset) || (dst.am() == NegOffset)) { - dst2.set_offset(dst2.offset() + 4); - str(src1, dst, cond); - str(src2, dst2, cond); - } else { // PostIndex or NegPostIndex. - ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); - dst2.set_offset(dst2.offset() - 4); - str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); - str(src2, dst2, cond); - } + dst2.set_offset(dst2.offset() + 4); + str(src1, dst, cond); + str(src2, dst2, cond); } } @@ -702,23 +632,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } -void MacroAssembler::Vmov(const DwVfpRegister dst, - const double imm, - const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP3)); - static const DoubleRepresentation minus_zero(-0.0); - static const DoubleRepresentation zero(0.0); - DoubleRepresentation value(imm); - // Handle special values first. - if (value.bits == zero.bits) { - vmov(dst, kDoubleRegZero, cond); - } else if (value.bits == minus_zero.bits) { - vneg(dst, kDoubleRegZero, cond); - } else { - vmov(dst, imm, cond); - } -} - void MacroAssembler::EnterFrame(StackFrame::Type type) { // r0-r3: preserved @@ -752,7 +665,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { mov(fp, Operand(sp)); // Setup new frame pointer. // Reserve room for saved entry sp and code object. sub(sp, sp, Operand(2 * kPointerSize)); - if (emit_debug_code()) { + if (FLAG_debug_code) { mov(ip, Operand(0)); str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); } @@ -760,17 +673,19 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); // Save the frame pointer and the context in top. - mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); + mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); str(fp, MemOperand(ip)); - mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate()))); + mov(ip, Operand(ExternalReference(Top::k_context_address))); str(cp, MemOperand(ip)); // Optionally save all double registers. if (save_doubles) { - DwVfpRegister first = d0; - DwVfpRegister last = - DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); - vstm(db_w, sp, first, last); + sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize)); + const int offset = -2 * kPointerSize; + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + vstr(reg, fp, offset - ((i + 1) * kDoubleSize)); + } // Note that d0 will be accessible at // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize, // since the sp slot and code slot were pushed after the fp. @@ -827,22 +742,20 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count) { // Optionally restore all double registers. if (save_doubles) { - // Calculate the stack location of the saved doubles and restore them. - const int offset = 2 * kPointerSize; - sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize)); - DwVfpRegister first = d0; - DwVfpRegister last = - DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); - vldm(ia, r3, first, last); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + const int offset = -2 * kPointerSize; + vldr(reg, fp, offset - ((i + 1) * kDoubleSize)); + } } // Clear top frame. mov(r3, Operand(0, RelocInfo::NONE)); - mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); + mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); str(r3, MemOperand(ip)); // Restore current context from top and clear it in debug mode. - mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate()))); + mov(ip, Operand(ExternalReference(Top::k_context_address))); ldr(cp, MemOperand(ip)); #ifdef DEBUG str(r3, MemOperand(ip)); @@ -857,25 +770,11 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { - if (use_eabi_hardfloat()) { - Move(dst, d0); - } else { - vmov(dst, r0, r1); - } -} - - -void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { - // This macro takes the dst register to make the code more readable - // at the call sites. However, the dst register has to be r5 to - // follow the calling convention which requires the call type to be - // in r5. - ASSERT(dst.is(r5)); - if (call_kind == CALL_AS_FUNCTION) { - mov(dst, Operand(Smi::FromInt(1))); - } else { - mov(dst, Operand(Smi::FromInt(0))); - } +#if !defined(USE_ARM_EABI) + UNREACHABLE(); +#else + vmov(dst, r0, r1); +#endif } @@ -885,8 +784,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Register code_reg, Label* done, InvokeFlag flag, - const CallWrapper& call_wrapper, - CallKind call_kind) { + PostCallGenerator* post_call_generator) { bool definitely_matches = false; Label regular_invoke; @@ -939,15 +837,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } Handle<Code> adaptor = - isolate()->builtins()->ArgumentsAdaptorTrampoline(); + Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); if (flag == CALL_FUNCTION) { - call_wrapper.BeforeCall(CallSize(adaptor)); - SetCallKind(r5, call_kind); - Call(adaptor); - call_wrapper.AfterCall(); + Call(adaptor, RelocInfo::CODE_TARGET); + if (post_call_generator != NULL) post_call_generator->Generate(); b(done); } else { - SetCallKind(r5, call_kind); Jump(adaptor, RelocInfo::CODE_TARGET); } bind(®ular_invoke); @@ -959,20 +854,16 @@ void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, - const CallWrapper& call_wrapper, - CallKind call_kind) { + PostCallGenerator* post_call_generator) { Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, - call_wrapper, call_kind); + post_call_generator); if (flag == CALL_FUNCTION) { - call_wrapper.BeforeCall(CallSize(code)); - SetCallKind(r5, call_kind); Call(code); - call_wrapper.AfterCall(); + if (post_call_generator != NULL) post_call_generator->Generate(); } else { ASSERT(flag == JUMP_FUNCTION); - SetCallKind(r5, call_kind); Jump(code); } @@ -986,17 +877,13 @@ void MacroAssembler::InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, - InvokeFlag flag, - CallKind call_kind) { + InvokeFlag flag) { Label done; - InvokePrologue(expected, actual, code, no_reg, &done, flag, - NullCallWrapper(), call_kind); + InvokePrologue(expected, actual, code, no_reg, &done, flag); if (flag == CALL_FUNCTION) { - SetCallKind(r5, call_kind); Call(code, rmode); } else { - SetCallKind(r5, call_kind); Jump(code, rmode); } @@ -1009,8 +896,7 @@ void MacroAssembler::InvokeCode(Handle<Code> code, void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual, InvokeFlag flag, - const CallWrapper& call_wrapper, - CallKind call_kind) { + PostCallGenerator* post_call_generator) { // Contract with called JS functions requires that function is passed in r1. ASSERT(fun.is(r1)); @@ -1027,14 +913,13 @@ void MacroAssembler::InvokeFunction(Register fun, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); ParameterCount expected(expected_reg); - InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); + InvokeCode(code_reg, expected, actual, flag, post_call_generator); } void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, - InvokeFlag flag, - CallKind call_kind) { + InvokeFlag flag) { ASSERT(function->is_compiled()); // Get the function and setup the context. @@ -1049,9 +934,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, // code field in the function to allow recompilation to take effect // without changing any of the call sites. ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); - InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind); + InvokeCode(r3, expected, actual, flag); } else { - InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind); + InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); } } @@ -1069,9 +954,9 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch, Label* fail) { ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); - cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); b(lt, fail); - cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); + cmp(scratch, Operand(LAST_JS_OBJECT_TYPE)); b(gt, fail); } @@ -1092,7 +977,7 @@ void MacroAssembler::IsObjectJSStringType(Register object, void MacroAssembler::DebugBreak() { ASSERT(allow_stub_calls()); mov(r0, Operand(0, RelocInfo::NONE)); - mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); + mov(r1, Operand(ExternalReference(Runtime::kDebugBreak))); CEntryStub ces(1); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } @@ -1115,7 +1000,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, && StackHandlerConstants::kPCOffset == 3 * kPointerSize); stm(db_w, sp, r3.bit() | fp.bit() | lr.bit()); // Save the current handler as the next handler. - mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); + mov(r3, Operand(ExternalReference(Top::k_handler_address))); ldr(r1, MemOperand(r3)); ASSERT(StackHandlerConstants::kNextOffset == 0); push(r1); @@ -1134,7 +1019,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, && StackHandlerConstants::kPCOffset == 3 * kPointerSize); stm(db_w, sp, r6.bit() | ip.bit() | lr.bit()); // Save the current handler as the next handler. - mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); + mov(r7, Operand(ExternalReference(Top::k_handler_address))); ldr(r6, MemOperand(r7)); ASSERT(StackHandlerConstants::kNextOffset == 0); push(r6); @@ -1147,7 +1032,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PopTryHandler() { ASSERT_EQ(0, StackHandlerConstants::kNextOffset); pop(r1); - mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); + mov(ip, Operand(ExternalReference(Top::k_handler_address))); add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); str(r1, MemOperand(ip)); } @@ -1163,7 +1048,7 @@ void MacroAssembler::Throw(Register value) { STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); // Drop the sp to the top of the handler. - mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); + mov(r3, Operand(ExternalReference(Top::k_handler_address))); ldr(sp, MemOperand(r3)); // Restore the next handler and frame pointer, discard handler state. @@ -1182,7 +1067,7 @@ void MacroAssembler::Throw(Register value) { // Restore cp otherwise. ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); #ifdef DEBUG - if (emit_debug_code()) { + if (FLAG_debug_code) { mov(lr, Operand(pc)); } #endif @@ -1202,7 +1087,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, } // Drop sp to the top stack handler. - mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); + mov(r3, Operand(ExternalReference(Top::k_handler_address))); ldr(sp, MemOperand(r3)); // Unwind the handlers until the ENTRY handler is found. @@ -1226,8 +1111,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, if (type == OUT_OF_MEMORY) { // Set external caught exception to false. - ExternalReference external_caught( - Isolate::k_external_caught_exception_address, isolate()); + ExternalReference external_caught(Top::k_external_caught_exception_address); mov(r0, Operand(false, RelocInfo::NONE)); mov(r2, Operand(external_caught)); str(r0, MemOperand(r2)); @@ -1235,8 +1119,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, // Set pending exception and r0 to out of memory exception. Failure* out_of_memory = Failure::OutOfMemoryException(); mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); - mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address, - isolate()))); + mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); str(r0, MemOperand(r2)); } @@ -1257,7 +1140,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, // Restore cp otherwise. ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); #ifdef DEBUG - if (emit_debug_code()) { + if (FLAG_debug_code) { mov(lr, Operand(pc)); } #endif @@ -1289,7 +1172,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check the context is a global context. - if (emit_debug_code()) { + if (FLAG_debug_code) { // TODO(119): avoid push(holder_reg)/pop(holder_reg) // Cannot use ip as a temporary in this verification code. Due to the fact // that ip is clobbered as part of cmp with an object Operand. @@ -1308,7 +1191,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, b(eq, &same_contexts); // Check the context is a global context. - if (emit_debug_code()) { + if (FLAG_debug_code) { // TODO(119): avoid push(holder_reg)/pop(holder_reg) // Cannot use ip as a temporary in this verification code. Due to the fact // that ip is clobbered as part of cmp with an object Operand. @@ -1350,7 +1233,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size, Label* gc_required, AllocationFlags flags) { if (!FLAG_inline_new) { - if (emit_debug_code()) { + if (FLAG_debug_code) { // Trash the registers to simulate an allocation failure. mov(result, Operand(0x7091)); mov(scratch1, Operand(0x7191)); @@ -1363,8 +1246,6 @@ void MacroAssembler::AllocateInNewSpace(int object_size, ASSERT(!result.is(scratch1)); ASSERT(!result.is(scratch2)); ASSERT(!scratch1.is(scratch2)); - ASSERT(!scratch1.is(ip)); - ASSERT(!scratch2.is(ip)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { @@ -1377,9 +1258,9 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Also, assert that the registers are numbered such that the values // are loaded in the correct order. ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference::new_space_allocation_top_address(); ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate()); + ExternalReference::new_space_allocation_limit_address(); intptr_t top = reinterpret_cast<intptr_t>(new_space_allocation_top.address()); intptr_t limit = @@ -1399,7 +1280,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Load allocation top into result and allocation limit into ip. ldm(ia, topaddr, result.bit() | ip.bit()); } else { - if (emit_debug_code()) { + if (FLAG_debug_code) { // Assert that result actually contains top on entry. ip is used // immediately below so this use of ip does not cause difference with // respect to register content between debug and release mode. @@ -1433,7 +1314,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, Label* gc_required, AllocationFlags flags) { if (!FLAG_inline_new) { - if (emit_debug_code()) { + if (FLAG_debug_code) { // Trash the registers to simulate an allocation failure. mov(result, Operand(0x7091)); mov(scratch1, Operand(0x7191)); @@ -1457,9 +1338,9 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, // Also, assert that the registers are numbered such that the values // are loaded in the correct order. ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference::new_space_allocation_top_address(); ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate()); + ExternalReference::new_space_allocation_limit_address(); intptr_t top = reinterpret_cast<intptr_t>(new_space_allocation_top.address()); intptr_t limit = @@ -1477,7 +1358,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, // Load allocation top into result and allocation limit into ip. ldm(ia, topaddr, result.bit() | ip.bit()); } else { - if (emit_debug_code()) { + if (FLAG_debug_code) { // Assert that result actually contains top on entry. ip is used // immediately below so this use of ip does not cause difference with // respect to register content between debug and release mode. @@ -1502,7 +1383,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, b(hi, gc_required); // Update allocation top. result temporarily holds the new top. - if (emit_debug_code()) { + if (FLAG_debug_code) { tst(scratch2, Operand(kObjectAlignmentMask)); Check(eq, "Unaligned allocation in new space"); } @@ -1518,7 +1399,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, void MacroAssembler::UndoAllocationInNewSpace(Register object, Register scratch) { ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference::new_space_allocation_top_address(); // Make sure the object has no tag before resetting top. and_(object, object, Operand(~kHeapObjectTagMask)); @@ -1654,30 +1535,12 @@ void MacroAssembler::CompareInstanceType(Register map, } -void MacroAssembler::CompareRoot(Register obj, - Heap::RootListIndex index) { - ASSERT(!obj.is(ip)); - LoadRoot(ip, index); - cmp(obj, ip); -} - - -void MacroAssembler::CheckFastElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0); - ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); - b(hi, fail); -} - - void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail, - SmiCheckType smi_check_type) { - if (smi_check_type == DO_SMI_CHECK) { + bool is_heap_object) { + if (!is_heap_object) { JumpIfSmi(obj, fail); } ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); @@ -1691,8 +1554,8 @@ void MacroAssembler::CheckMap(Register obj, Register scratch, Heap::RootListIndex index, Label* fail, - SmiCheckType smi_check_type) { - if (smi_check_type == DO_SMI_CHECK) { + bool is_heap_object) { + if (!is_heap_object) { JumpIfSmi(obj, fail); } ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); @@ -1702,23 +1565,6 @@ void MacroAssembler::CheckMap(Register obj, } -void MacroAssembler::DispatchMap(Register obj, - Register scratch, - Handle<Map> map, - Handle<Code> success, - SmiCheckType smi_check_type) { - Label fail; - if (smi_check_type == DO_SMI_CHECK) { - JumpIfSmi(obj, &fail); - } - ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - mov(ip, Operand(map)); - cmp(scratch, ip); - Jump(success, RelocInfo::CODE_TARGET, eq); - bind(&fail); -} - - void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, Register scratch, @@ -1772,17 +1618,6 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { } -MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. - Object* result; - { MaybeObject* maybe_result = stub->TryGetCode(); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond); - return result; -} - - void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); @@ -1795,7 +1630,7 @@ MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; } - Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond); + Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); return result; } @@ -1844,7 +1679,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( // No more valid handles (the result handle was the last one). Restore // previous handle scope. str(r4, MemOperand(r7, kNextOffset)); - if (emit_debug_code()) { + if (FLAG_debug_code) { ldr(r1, MemOperand(r7, kLevelOffset)); cmp(r1, r6); Check(eq, "Unexpected level after return from api call"); @@ -1858,7 +1693,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( // Check if the function scheduled an exception. bind(&leave_exit_frame); LoadRoot(r4, Heap::kTheHoleValueRootIndex); - mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); + mov(ip, Operand(ExternalReference::scheduled_exception_address())); ldr(r5, MemOperand(ip)); cmp(r4, r5); b(ne, &promote_scheduled_exception); @@ -1869,11 +1704,8 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( mov(pc, lr); bind(&promote_scheduled_exception); - MaybeObject* result - = TryTailCallExternalReference( - ExternalReference(Runtime::kPromoteScheduledException, isolate()), - 0, - 1); + MaybeObject* result = TryTailCallExternalReference( + ExternalReference(Runtime::kPromoteScheduledException), 0, 1); if (result->IsFailure()) { return result; } @@ -1882,10 +1714,8 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( bind(&delete_allocated_handles); str(r5, MemOperand(r7, kLimitOffset)); mov(r4, r0); - PrepareCallCFunction(1, r5); - mov(r0, Operand(ExternalReference::isolate_address())); - CallCFunction( - ExternalReference::delete_handle_scope_extensions(isolate()), 1); + PrepareCallCFunction(0, r5); + CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0); mov(r0, r4); jmp(&leave_exit_frame); @@ -2122,121 +1952,6 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, } -void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, - Register input_high, - Register input_low, - Register scratch) { - Label done, normal_exponent, restore_sign; - - // Extract the biased exponent in result. - Ubfx(result, - input_high, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // Check for Infinity and NaNs, which should return 0. - cmp(result, Operand(HeapNumber::kExponentMask)); - mov(result, Operand(0), LeaveCC, eq); - b(eq, &done); - - // Express exponent as delta to (number of mantissa bits + 31). - sub(result, - result, - Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31), - SetCC); - - // If the delta is strictly positive, all bits would be shifted away, - // which means that we can return 0. - b(le, &normal_exponent); - mov(result, Operand(0)); - b(&done); - - bind(&normal_exponent); - const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; - // Calculate shift. - add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC); - - // Save the sign. - Register sign = result; - result = no_reg; - and_(sign, input_high, Operand(HeapNumber::kSignMask)); - - // Set the implicit 1 before the mantissa part in input_high. - orr(input_high, - input_high, - Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - // Shift the mantissa bits to the correct position. - // We don't need to clear non-mantissa bits as they will be shifted away. - // If they weren't, it would mean that the answer is in the 32bit range. - mov(input_high, Operand(input_high, LSL, scratch)); - - // Replace the shifted bits with bits from the lower mantissa word. - Label pos_shift, shift_done; - rsb(scratch, scratch, Operand(32), SetCC); - b(&pos_shift, ge); - - // Negate scratch. - rsb(scratch, scratch, Operand(0)); - mov(input_low, Operand(input_low, LSL, scratch)); - b(&shift_done); - - bind(&pos_shift); - mov(input_low, Operand(input_low, LSR, scratch)); - - bind(&shift_done); - orr(input_high, input_high, Operand(input_low)); - // Restore sign if necessary. - cmp(sign, Operand(0)); - result = sign; - sign = no_reg; - rsb(result, input_high, Operand(0), LeaveCC, ne); - mov(result, input_high, LeaveCC, eq); - bind(&done); -} - - -void MacroAssembler::EmitECMATruncate(Register result, - DwVfpRegister double_input, - SwVfpRegister single_scratch, - Register scratch, - Register input_high, - Register input_low) { - CpuFeatures::Scope scope(VFP3); - ASSERT(!input_high.is(result)); - ASSERT(!input_low.is(result)); - ASSERT(!input_low.is(input_high)); - ASSERT(!scratch.is(result) && - !scratch.is(input_high) && - !scratch.is(input_low)); - ASSERT(!single_scratch.is(double_input.low()) && - !single_scratch.is(double_input.high())); - - Label done; - - // Clear cumulative exception flags. - ClearFPSCRBits(kVFPExceptionMask, scratch); - // Try a conversion to a signed integer. - vcvt_s32_f64(single_scratch, double_input); - vmov(result, single_scratch); - // Retrieve he FPSCR. - vmrs(scratch); - // Check for overflow and NaNs. - tst(scratch, Operand(kVFPOverflowExceptionBit | - kVFPUnderflowExceptionBit | - kVFPInvalidOpExceptionBit)); - // If we had no exceptions we are done. - b(eq, &done); - - // Load the double value and perform a manual truncation. - vmov(input_low, input_high, double_input); - EmitOutOfInt32RangeTruncate(result, - input_high, - input_low, - scratch); - bind(&done); -} - - void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { @@ -2256,8 +1971,7 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, } -void MacroAssembler::CallRuntime(const Runtime::Function* f, - int num_arguments) { +void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { // All parameters are on the stack. r0 has the return value after call. // If the expected number of arguments of the runtime function is @@ -2273,7 +1987,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // should remove this need and make the runtime routine entry code // smarter. mov(r0, Operand(num_arguments)); - mov(r1, Operand(ExternalReference(f, isolate()))); + mov(r1, Operand(ExternalReference(f))); CEntryStub stub(1); CallStub(&stub); } @@ -2285,9 +1999,9 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { - const Runtime::Function* function = Runtime::FunctionForId(id); + Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); - mov(r1, Operand(ExternalReference(function, isolate()))); + mov(r1, Operand(ExternalReference(function))); CEntryStub stub(1); stub.SaveDoubles(); CallStub(&stub); @@ -2330,9 +2044,7 @@ MaybeObject* MacroAssembler::TryTailCallExternalReference( void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size) { - TailCallExternalReference(ExternalReference(fid, isolate()), - num_arguments, - result_size); + TailCallExternalReference(ExternalReference(fid), num_arguments, result_size); } @@ -2360,17 +2072,14 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, - InvokeFlag flag, - const CallWrapper& call_wrapper) { + InvokeJSFlags flags, + PostCallGenerator* post_call_generator) { GetBuiltinEntry(r2, id); - if (flag == CALL_FUNCTION) { - call_wrapper.BeforeCall(CallSize(r2)); - SetCallKind(r5, CALL_AS_METHOD); + if (flags == CALL_JS) { Call(r2); - call_wrapper.AfterCall(); + if (post_call_generator != NULL) post_call_generator->Generate(); } else { - ASSERT(flag == JUMP_FUNCTION); - SetCallKind(r5, CALL_AS_METHOD); + ASSERT(flags == JUMP_JS); Jump(r2); } } @@ -2430,14 +2139,14 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, void MacroAssembler::Assert(Condition cond, const char* msg) { - if (emit_debug_code()) + if (FLAG_debug_code) Check(cond, msg); } void MacroAssembler::AssertRegisterIsRoot(Register reg, Heap::RootListIndex index) { - if (emit_debug_code()) { + if (FLAG_debug_code) { LoadRoot(ip, index); cmp(reg, ip); Check(eq, "Register did not match expected root"); @@ -2446,7 +2155,7 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg, void MacroAssembler::AssertFastElements(Register elements) { - if (emit_debug_code()) { + if (FLAG_debug_code) { ASSERT(!elements.is(ip)); Label ok; push(elements); @@ -2516,9 +2225,12 @@ void MacroAssembler::Abort(const char* msg) { void MacroAssembler::LoadContext(Register dst, int context_chain_length) { if (context_chain_length > 0) { // Move up the chain of contexts to the context containing the slot. - ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); + ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); + // Load the function context (which is the incoming, outer context). + ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); for (int i = 1; i < context_chain_length; i++) { - ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); + ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); + ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); } } else { // Slot is in the current function context. Move it into the @@ -2526,6 +2238,17 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { // cannot be allowed to destroy the context in esi). mov(dst, cp); } + + // We should not have found a 'with' context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (FLAG_debug_code) { + ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); + cmp(dst, ip); + Check(eq, "Yo dawg, I heard you liked function contexts " + "so I put function contexts in all your contexts"); + } } @@ -2545,9 +2268,9 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register scratch) { // Load the initial map. The global functions all have initial maps. ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); - if (emit_debug_code()) { + if (FLAG_debug_code) { Label ok, fail; - CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); + CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false); b(&ok); bind(&fail); Abort("Global functions must have initial map"); @@ -2567,18 +2290,6 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZero( } -void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( - Register reg, - Register scratch, - Label* zero_and_neg, - Label* not_power_of_two) { - sub(scratch, reg, Operand(1), SetCC); - b(mi, zero_and_neg); - tst(scratch, reg); - b(ne, not_power_of_two); -} - - void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi) { @@ -2629,7 +2340,9 @@ void MacroAssembler::AbortIfNotString(Register object) { void MacroAssembler::AbortIfNotRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { - CompareRoot(src, root_value_index); + ASSERT(!src.is(ip)); + LoadRoot(ip, root_value_index); + cmp(src, ip); Assert(eq, message); } @@ -2673,7 +2386,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, // Check that neither is a smi. STATIC_ASSERT(kSmiTag == 0); and_(scratch1, first, Operand(second)); - JumpIfSmi(scratch1, failure); + tst(scratch1, Operand(kSmiTagMask)); + b(eq, failure); JumpIfNonSmisNotBothSequentialAsciiStrings(first, second, scratch1, @@ -2764,7 +2478,7 @@ void MacroAssembler::CopyBytes(Register src, // Copy bytes in word size chunks. bind(&word_loop); - if (emit_debug_code()) { + if (FLAG_debug_code) { tst(src, Operand(kPointerSize - 1)); Assert(eq, "Expecting alignment for CopyBytes"); } @@ -2863,38 +2577,11 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, b(ne, failure); } -static const int kRegisterPassedArguments = 4; - -int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments) { - int stack_passed_words = 0; - if (use_eabi_hardfloat()) { - // In the hard floating point calling convention, we can use - // all double registers to pass doubles. - if (num_double_arguments > DoubleRegister::kNumRegisters) { - stack_passed_words += - 2 * (num_double_arguments - DoubleRegister::kNumRegisters); - } - } else { - // In the soft floating point calling convention, every double - // argument is passed using two registers. - num_reg_arguments += 2 * num_double_arguments; - } - // Up to four simple arguments are passed in registers r0..r3. - if (num_reg_arguments > kRegisterPassedArguments) { - stack_passed_words += num_reg_arguments - kRegisterPassedArguments; - } - return stack_passed_words; -} - - -void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, - int num_double_arguments, - Register scratch) { +void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); - int stack_passed_arguments = CalculateStackPassedWords( - num_reg_arguments, num_double_arguments); + // Up to four simple arguments are passed in registers r0..r3. + int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; if (frame_alignment > kPointerSize) { // Make stack end at alignment and make room for num_arguments - 4 words // and the original value of sp. @@ -2909,97 +2596,19 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, } -void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, - Register scratch) { - PrepareCallCFunction(num_reg_arguments, 0, scratch); -} - - -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { - if (use_eabi_hardfloat()) { - Move(d0, dreg); - } else { - vmov(r0, r1, dreg); - } -} - - -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, - DoubleRegister dreg2) { - if (use_eabi_hardfloat()) { - if (dreg2.is(d0)) { - ASSERT(!dreg1.is(d1)); - Move(d1, dreg2); - Move(d0, dreg1); - } else { - Move(d0, dreg1); - Move(d1, dreg2); - } - } else { - vmov(r0, r1, dreg1); - vmov(r2, r3, dreg2); - } -} - - -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, - Register reg) { - if (use_eabi_hardfloat()) { - Move(d0, dreg); - Move(r0, reg); - } else { - Move(r2, reg); - vmov(r0, r1, dreg); - } -} - - void MacroAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(no_reg, - function, - ip, - num_reg_arguments, - num_double_arguments); -} - - -void MacroAssembler::CallCFunction(Register function, - Register scratch, - int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(function, - ExternalReference::the_hole_value_location(isolate()), - scratch, - num_reg_arguments, - num_double_arguments); -} - - -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments) { - CallCFunction(function, num_arguments, 0); -} - - -void MacroAssembler::CallCFunction(Register function, - Register scratch, int num_arguments) { - CallCFunction(function, scratch, num_arguments, 0); + mov(ip, Operand(function)); + CallCFunction(ip, num_arguments); } -void MacroAssembler::CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, - int num_reg_arguments, - int num_double_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. #if defined(V8_HOST_ARCH_ARM) - if (emit_debug_code()) { + if (FLAG_debug_code) { int frame_alignment = OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { @@ -3018,14 +2627,9 @@ void MacroAssembler::CallCFunctionHelper(Register function, // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. - if (function.is(no_reg)) { - mov(scratch, Operand(function_reference)); - function = scratch; - } Call(function); - int stack_passed_arguments = CalculateStackPassedWords( - num_reg_arguments, num_double_arguments); - if (ActivationFrameAlignment() > kPointerSize) { + int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; + if (OS::ActivationFrameAlignment() > kPointerSize) { ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); @@ -3038,7 +2642,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, const uint32_t kLdrOffsetMask = (1 << 12) - 1; const int32_t kPCRegOffset = 2 * kPointerSize; ldr(result, MemOperand(ldr_location)); - if (emit_debug_code()) { + if (FLAG_debug_code) { // Check that the instruction is a ldr reg, [pc + offset] . and_(result, result, Operand(kLdrPCPattern)); cmp(result, Operand(kLdrPCPattern)); @@ -3053,60 +2657,11 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, } -void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { - Usat(output_reg, 8, Operand(input_reg)); -} - - -void MacroAssembler::ClampDoubleToUint8(Register result_reg, - DoubleRegister input_reg, - DoubleRegister temp_double_reg) { - Label above_zero; - Label done; - Label in_bounds; - - Vmov(temp_double_reg, 0.0); - VFPCompareAndSetFlags(input_reg, temp_double_reg); - b(gt, &above_zero); - - // Double value is less than zero, NaN or Inf, return 0. - mov(result_reg, Operand(0)); - b(al, &done); - - // Double value is >= 255, return 255. - bind(&above_zero); - Vmov(temp_double_reg, 255.0); - VFPCompareAndSetFlags(input_reg, temp_double_reg); - b(le, &in_bounds); - mov(result_reg, Operand(255)); - b(al, &done); - - // In 0-255 range, round and truncate. - bind(&in_bounds); - Vmov(temp_double_reg, 0.5); - vadd(temp_double_reg, input_reg, temp_double_reg); - vcvt_u32_f64(s0, temp_double_reg); - vmov(result_reg, s0); - bind(&done); -} - - -void MacroAssembler::LoadInstanceDescriptors(Register map, - Register descriptors) { - ldr(descriptors, - FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset)); - Label not_smi; - JumpIfNotSmi(descriptors, ¬_smi); - mov(descriptors, Operand(FACTORY->empty_descriptor_array())); - bind(¬_smi); -} - - CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), size_(instructions * Assembler::kInstrSize), - masm_(Isolate::Current(), address, size_ + Assembler::kGap) { + masm_(address, size_ + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 1918858ebe..acd1d79b7c 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -29,11 +29,13 @@ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #include "assembler.h" -#include "v8globals.h" namespace v8 { namespace internal { +// Forward declaration. +class PostCallGenerator; + // ---------------------------------------------------------------------------- // Static helper functions @@ -53,6 +55,12 @@ static inline Operand SmiUntagOperand(Register object) { const Register cp = { 8 }; // JavaScript context pointer const Register roots = { 10 }; // Roots array pointer. +enum InvokeJSFlags { + CALL_JS, + JUMP_JS +}; + + // Flags used for the AllocateInNewSpace functions. enum AllocationFlags { // No special flags. @@ -82,28 +90,15 @@ enum ObjectToDoubleFlags { // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: - // The isolate parameter can be NULL if the macro assembler should - // not use isolate-dependent functionality. In this case, it's the - // responsibility of the caller to never invoke such function on the - // macro assembler. - MacroAssembler(Isolate* isolate, void* buffer, int size); + MacroAssembler(void* buffer, int size); // Jump, Call, and Ret pseudo instructions implementing inter-working. void Jump(Register target, Condition cond = al); - void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); + void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); - int CallSize(Register target, Condition cond = al); void Call(Register target, Condition cond = al); - int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); - void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); - int CallSize(Handle<Code> code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - unsigned ast_id = kNoASTId, - Condition cond = al); - void Call(Handle<Code> code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - unsigned ast_id = kNoASTId, - Condition cond = al); + void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al); + void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Ret(Condition cond = al); // Emit code to discard a non-negative number of pointer-sized elements @@ -140,12 +135,11 @@ class MacroAssembler: public Assembler { Condition cond = al); void Call(Label* target); - - // Register move. May do nothing if the registers are identical. void Move(Register dst, Handle<Object> value); - void Move(Register dst, Register src, Condition cond = al); - void Move(DoubleRegister dst, DoubleRegister src); - + // May do nothing if the registers are identical. + void Move(Register dst, Register src); + // Jumps to the label at the index given by the Smi in "index". + void SmiJumpTable(Register index, Vector<Label*> targets); // Load an object from the root table. void LoadRoot(Register destination, Heap::RootListIndex index, @@ -190,9 +184,6 @@ class MacroAssembler: public Assembler { Register address, Register scratch); - // Push a handle. - void Push(Handle<Object> handle); - // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { ASSERT(!src1.is(src2)); @@ -312,10 +303,6 @@ class MacroAssembler: public Assembler { const Register fpscr_flags, const Condition cond = al); - void Vmov(const DwVfpRegister dst, - const double imm, - const Condition cond = al); - // --------------------------------------------------------------------------- // Activation frames @@ -351,38 +338,29 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // JavaScript invokes - // Setup call kind marking in ecx. The method takes ecx as an - // explicit first parameter to make the code more readable at the - // call sites. - void SetCallKind(Register dst, CallKind kind); - // Invoke the JavaScript function code by either calling or jumping. void InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, - const CallWrapper& call_wrapper, - CallKind call_kind); + PostCallGenerator* post_call_generator = NULL); void InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, - InvokeFlag flag, - CallKind call_kind); + InvokeFlag flag); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. void InvokeFunction(Register function, const ParameterCount& actual, InvokeFlag flag, - const CallWrapper& call_wrapper, - CallKind call_kind); + PostCallGenerator* post_call_generator = NULL); void InvokeFunction(JSFunction* function, const ParameterCount& actual, - InvokeFlag flag, - CallKind call_kind); + InvokeFlag flag); void IsObjectJSObjectType(Register heap_object, Register map, @@ -582,12 +560,6 @@ class MacroAssembler: public Assembler { InstanceType type); - // Check if a map for a JSObject indicates that the object has fast elements. - // Jump to the specified label if it does not. - void CheckFastElements(Register map, - Register scratch, - Label* fail); - // Check if the map of an object is equal to a specified map (either // given directly or as an index into the root list) and branch to // label if not. Skip the smi check if not required (object is known @@ -596,29 +568,13 @@ class MacroAssembler: public Assembler { Register scratch, Handle<Map> map, Label* fail, - SmiCheckType smi_check_type); - + bool is_heap_object); void CheckMap(Register obj, Register scratch, Heap::RootListIndex index, Label* fail, - SmiCheckType smi_check_type); - - - // Check if the map of an object is equal to a specified map and branch to a - // specified target if equal. Skip the smi check if not required (object is - // known to be a heap object) - void DispatchMap(Register obj, - Register scratch, - Handle<Map> map, - Handle<Code> success, - SmiCheckType smi_check_type); - - - // Compare the object in a register to a value from the root list. - // Uses the ip register as scratch. - void CompareRoot(Register obj, Heap::RootListIndex index); + bool is_heap_object); // Load and check the instance type of an object for being a string. @@ -685,11 +641,11 @@ class MacroAssembler: public Assembler { DwVfpRegister double_scratch, Label *not_int32); - // Truncates a double using a specific rounding mode. - // Clears the z flag (ne condition) if an overflow occurs. - // If exact_conversion is true, the z flag is also cleared if the conversion - // was inexact, ie. if the double value could not be converted exactly - // to a 32bit integer. +// Truncates a double using a specific rounding mode. +// Clears the z flag (ne condition) if an overflow occurs. +// If exact_conversion is true, the z flag is also cleared if the conversion +// was inexact, ie. if the double value could not be converted exactly +// to a 32bit integer. void EmitVFPTruncate(VFPRoundingMode rounding_mode, SwVfpRegister result, DwVfpRegister double_input, @@ -698,27 +654,6 @@ class MacroAssembler: public Assembler { CheckForInexactConversion check = kDontCheckForInexactConversion); - // Helper for EmitECMATruncate. - // This will truncate a floating-point value outside of the singed 32bit - // integer range to a 32bit signed integer. - // Expects the double value loaded in input_high and input_low. - // Exits with the answer in 'result'. - // Note that this code does not work for values in the 32bit range! - void EmitOutOfInt32RangeTruncate(Register result, - Register input_high, - Register input_low, - Register scratch); - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer and all other registers clobbered. - void EmitECMATruncate(Register result, - DwVfpRegister double_input, - SwVfpRegister single_scratch, - Register scratch, - Register scratch2, - Register scratch3); - // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer // for 0 (31 instead of 32). Source and scratch can be the same in which case @@ -734,11 +669,6 @@ class MacroAssembler: public Assembler { // Call a code stub. void CallStub(CodeStub* stub, Condition cond = al); - // Call a code stub and return the code object called. Try to generate - // the code if necessary. Do not perform a GC but instead return a retry - // after GC failure. - MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al); - // Call a code stub. void TailCallStub(CodeStub* stub, Condition cond = al); @@ -749,7 +679,7 @@ class MacroAssembler: public Assembler { Condition cond = al); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments); + void CallRuntime(Runtime::Function* f, int num_arguments); void CallRuntimeSaveDoubles(Runtime::FunctionId id); // Convenience function: Same as above, but takes the fid instead. @@ -777,32 +707,15 @@ class MacroAssembler: public Assembler { int num_arguments, int result_size); - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - // Before calling a C-function from generated code, align arguments on stack. // After aligning the frame, non-register arguments must be stored in // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments - // are word sized. If double arguments are used, this function assumes that - // all double arguments are stored before core registers; otherwise the - // correct alignment of the double values is not guaranteed. + // are word sized. // Some compilers/platforms require the stack to be aligned when calling // C++ code. // Needs a scratch register to do some arithmetic. This register will be // trashed. - void PrepareCallCFunction(int num_reg_arguments, - int num_double_registers, - Register scratch); - void PrepareCallCFunction(int num_reg_arguments, - Register scratch); - - // There are two ways of passing double arguments on ARM, depending on - // whether soft or hard floating point ABI is used. These functions - // abstract parameter passing for the three different ways we call - // C functions from generated code. - void SetCallCDoubleArguments(DoubleRegister dreg); - void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2); - void SetCallCDoubleArguments(DoubleRegister dreg, Register reg); + void PrepareCallCFunction(int num_arguments, Register scratch); // Calls a C function and cleans up the space for arguments allocated // by PrepareCallCFunction. The called function is not allowed to trigger a @@ -810,13 +723,7 @@ class MacroAssembler: public Assembler { // return address (unless this is somehow accounted for by the called // function). void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, Register scratch, int num_arguments); - void CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments); - void CallCFunction(Register function, Register scratch, - int num_reg_arguments, - int num_double_arguments); + void CallCFunction(Register function, int num_arguments); void GetCFunctionDoubleResult(const DoubleRegister dst); @@ -835,8 +742,8 @@ class MacroAssembler: public Assembler { // Invoke specified builtin JavaScript function. Adds an entry to // the unresolved list if the name does not resolve. void InvokeBuiltin(Builtins::JavaScript id, - InvokeFlag flag, - const CallWrapper& call_wrapper = NullCallWrapper()); + InvokeJSFlags flags, + PostCallGenerator* post_call_generator = NULL); // Store the code object for the given builtin in the target register and // setup the function in r1. @@ -845,10 +752,7 @@ class MacroAssembler: public Assembler { // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); - Handle<Object> CodeObject() { - ASSERT(!code_object_.is_null()); - return code_object_; - } + Handle<Object> CodeObject() { return code_object_; } // --------------------------------------------------------------------------- @@ -883,15 +787,6 @@ class MacroAssembler: public Assembler { void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } - // EABI variant for double arguments in use. - bool use_eabi_hardfloat() { -#if USE_EABI_HARDFLOAT - return true; -#else - return false; -#endif - } - // --------------------------------------------------------------------------- // Number utilities @@ -902,16 +797,6 @@ class MacroAssembler: public Assembler { void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, Label* not_power_of_two_or_zero); - // Check whether the value of reg is a power of two and not zero. - // Control falls through if it is, with scratch containing the mask - // value (reg - 1). - // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is - // zero or negative, or jumps to the 'not_power_of_two' label if the value is - // strictly positive but not a power of two. - void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, - Register scratch, - Label* zero_and_neg, - Label* not_power_of_two); // --------------------------------------------------------------------------- // Smi utilities @@ -1019,23 +904,9 @@ class MacroAssembler: public Assembler { Register result); - void ClampUint8(Register output_reg, Register input_reg); - - void ClampDoubleToUint8(Register result_reg, - DoubleRegister input_reg, - DoubleRegister temp_double_reg); - - - void LoadInstanceDescriptors(Register map, Register descriptors); - private: - void CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, - int num_reg_arguments, - int num_double_arguments); - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); // Helper functions for generating invokes. void InvokePrologue(const ParameterCount& expected, @@ -1044,8 +915,7 @@ class MacroAssembler: public Assembler { Register code_reg, Label* done, InvokeFlag flag, - const CallWrapper& call_wrapper, - CallKind call_kind); + PostCallGenerator* post_call_generator = NULL); // Activation support. void EnterFrame(StackFrame::Type type); @@ -1106,6 +976,17 @@ class CodePatcher { #endif // ENABLE_DEBUGGER_SUPPORT +// Helper class for generating code or data associated with the code +// right after a call instruction. As an example this can be used to +// generate safepoint data after calls for crankshaft. +class PostCallGenerator { + public: + PostCallGenerator() { } + virtual ~PostCallGenerator() { } + virtual void Generate() = 0; +}; + + // ----------------------------------------------------------------------------- // Static helper functions. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 983a5286e0..1f6ed6712d 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -60,7 +60,6 @@ namespace internal { * Each call to a public method should retain this convention. * * The stack will have the following structure: - * - fp[52] Isolate* isolate (Address of the current isolate) * - fp[48] direct_call (if 1, direct call from JavaScript code, * if 0, call through the runtime system). * - fp[44] stack_area_base (High end of the memory area to use as @@ -116,7 +115,7 @@ namespace internal { RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( Mode mode, int registers_to_save) - : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), + : masm_(new MacroAssembler(NULL, kRegExpCodeSize)), mode_(mode), num_registers_(registers_to_save), num_saved_registers_(registers_to_save), @@ -347,7 +346,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( __ sub(current_input_offset(), r2, end_of_input_address()); } else { ASSERT(mode_ == UC16); - int argument_count = 4; + int argument_count = 3; __ PrepareCallCFunction(argument_count, r2); // r0 - offset of start of capture @@ -358,7 +357,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // r0: Address byte_offset1 - Address captured substring's start. // r1: Address byte_offset2 - Address of current character position. // r2: size_t byte_length - length of capture in bytes(!) - // r3: Isolate* isolate // Address of start of capture. __ add(r0, r0, Operand(end_of_input_address())); @@ -368,11 +366,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( __ mov(r4, Operand(r1)); // Address of current input position. __ add(r1, current_input_offset(), Operand(end_of_input_address())); - // Isolate. - __ mov(r3, Operand(ExternalReference::isolate_address())); ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + ExternalReference::re_case_insensitive_compare_uc16(); __ CallCFunction(function, argument_count); // Check if function returned non-zero for success or zero for failure. @@ -605,7 +601,7 @@ void RegExpMacroAssemblerARM::Fail() { } -Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { +Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Finalize code - write the entry point code now we know how many // registers we need. @@ -630,7 +626,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Label stack_ok; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ sub(r0, sp, r0, SetCC); @@ -781,13 +777,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Label grow_failed; // Call GrowStack(backtrack_stackpointer(), &stack_base) - static const int num_arguments = 3; + static const int num_arguments = 2; __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); __ add(r1, frame_pointer(), Operand(kStackHighEnd)); - __ mov(r2, Operand(ExternalReference::isolate_address())); ExternalReference grow_stack = - ExternalReference::re_grow_stack(masm_->isolate()); + ExternalReference::re_grow_stack(); __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. @@ -809,11 +804,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { CodeDesc code_desc; masm_->GetCode(&code_desc); - Handle<Code> code = FACTORY->NewCode(code_desc, + Handle<Code> code = Factory::NewCode(code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); - PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source)); - return Handle<HeapObject>::cast(code); + PROFILE(RegExpCodeCreateEvent(*code, *source)); + return Handle<Object>::cast(code); } @@ -899,12 +894,13 @@ void RegExpMacroAssemblerARM::PushBacktrack(Label* label) { constant_offset - offset_of_pc_register_read; ASSERT(pc_offset_of_constant < 0); if (is_valid_memory_offset(pc_offset_of_constant)) { - Assembler::BlockConstPoolScope block_const_pool(masm_); + masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize); __ ldr(r0, MemOperand(pc, pc_offset_of_constant)); } else { // Not a 12-bit offset, so it needs to be loaded from the constant // pool. - Assembler::BlockConstPoolScope block_const_pool(masm_); + masm_->BlockConstPoolBefore( + masm_->pc_offset() + 2 * Assembler::kInstrSize); __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize)); __ ldr(r0, MemOperand(pc, r0)); } @@ -1002,7 +998,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { __ mov(r1, Operand(masm_->CodeObject())); // r0 becomes return address pointer. ExternalReference stack_guard_check = - ExternalReference::re_check_stack_guard_state(masm_->isolate()); + ExternalReference::re_check_stack_guard_state(); CallCFunctionUsingStub(stack_guard_check, num_arguments); } @@ -1017,10 +1013,8 @@ static T& frame_entry(Address re_frame, int frame_offset) { int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, Code* re_code, Address re_frame) { - Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); - ASSERT(isolate == Isolate::Current()); - if (isolate->stack_guard()->IsStackOverflow()) { - isolate->StackOverflow(); + if (StackGuard::IsStackOverflow()) { + Top::StackOverflow(); return EXCEPTION; } @@ -1164,7 +1158,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) { void RegExpMacroAssemblerARM::CheckPreemption() { // Check for preemption. ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(sp, r0); @@ -1174,7 +1168,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() { void RegExpMacroAssemblerARM::CheckStackLimit() { ExternalReference stack_limit = - ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); + ExternalReference::address_of_regexp_stack_limit(); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(backtrack_stackpointer(), Operand(r0)); @@ -1184,7 +1178,8 @@ void RegExpMacroAssemblerARM::CheckStackLimit() { void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() { __ CheckConstPool(false, false); - Assembler::BlockConstPoolScope block_const_pool(masm_); + __ BlockConstPoolBefore( + masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize); backtrack_constant_pool_offset_ = masm_->pc_offset(); for (int i = 0; i < kBacktrackConstantPoolSize; i++) { __ emit(0); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index d771e4033f..d9d0b3562e 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -82,7 +82,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match); virtual void Fail(); - virtual Handle<HeapObject> GetCode(Handle<String> source); + virtual Handle<Object> GetCode(Handle<String> source); virtual void GoTo(Label* label); virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); @@ -127,7 +127,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; - static const int kIsolate = kDirectCall + kPointerSize; // Below the frame pointer. // Register parameters stored by setup code. diff --git a/deps/v8/src/arm/register-allocator-arm-inl.h b/deps/v8/src/arm/register-allocator-arm-inl.h new file mode 100644 index 0000000000..945cdeb3cc --- /dev/null +++ b/deps/v8/src/arm/register-allocator-arm-inl.h @@ -0,0 +1,100 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_ +#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_ + +#include "v8.h" + +namespace v8 { +namespace internal { + +// ------------------------------------------------------------------------- +// RegisterAllocator implementation. + +bool RegisterAllocator::IsReserved(Register reg) { + return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc); +} + + + +// The register allocator uses small integers to represent the +// non-reserved assembler registers. The mapping is: +// +// r0 <-> 0 +// r1 <-> 1 +// r2 <-> 2 +// r3 <-> 3 +// r4 <-> 4 +// r5 <-> 5 +// r6 <-> 6 +// r7 <-> 7 +// r9 <-> 8 +// r10 <-> 9 +// ip <-> 10 +// lr <-> 11 + +int RegisterAllocator::ToNumber(Register reg) { + ASSERT(reg.is_valid() && !IsReserved(reg)); + const int kNumbers[] = { + 0, // r0 + 1, // r1 + 2, // r2 + 3, // r3 + 4, // r4 + 5, // r5 + 6, // r6 + 7, // r7 + -1, // cp + 8, // r9 + 9, // r10 + -1, // fp + 10, // ip + -1, // sp + 11, // lr + -1 // pc + }; + return kNumbers[reg.code()]; +} + + +Register RegisterAllocator::ToRegister(int num) { + ASSERT(num >= 0 && num < kNumRegisters); + const Register kRegisters[] = + { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr }; + return kRegisters[num]; +} + + +void RegisterAllocator::Initialize() { + Reset(); +} + + +} } // namespace v8::internal + +#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_ diff --git a/deps/v8/src/arm/register-allocator-arm.cc b/deps/v8/src/arm/register-allocator-arm.cc new file mode 100644 index 0000000000..3b35574da3 --- /dev/null +++ b/deps/v8/src/arm/register-allocator-arm.cc @@ -0,0 +1,63 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_ARM) + +#include "codegen-inl.h" +#include "register-allocator-inl.h" + +namespace v8 { +namespace internal { + +// ------------------------------------------------------------------------- +// Result implementation. + +void Result::ToRegister() { + UNIMPLEMENTED(); +} + + +void Result::ToRegister(Register target) { + UNIMPLEMENTED(); +} + + +// ------------------------------------------------------------------------- +// RegisterAllocator implementation. + +Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() { + // No byte registers on ARM. + UNREACHABLE(); + return Result(); +} + + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/register-allocator-arm.h b/deps/v8/src/arm/register-allocator-arm.h new file mode 100644 index 0000000000..fdbc88f5dc --- /dev/null +++ b/deps/v8/src/arm/register-allocator-arm.h @@ -0,0 +1,44 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_ +#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_ + +namespace v8 { +namespace internal { + +class RegisterAllocatorConstants : public AllStatic { + public: + // No registers are currently managed by the register allocator on ARM. + static const int kNumRegisters = 0; + static const int kInvalidRegister = -1; +}; + + +} } // namespace v8::internal + +#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_ diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 6af535553f..f475a18b09 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -49,12 +49,12 @@ namespace internal { // Windows C Run-Time Library does not provide vsscanf. #define SScanF sscanf // NOLINT -// The ArmDebugger class is used by the simulator while debugging simulated ARM +// The Debugger class is used by the simulator while debugging simulated ARM // code. -class ArmDebugger { +class Debugger { public: - explicit ArmDebugger(Simulator* sim); - ~ArmDebugger(); + explicit Debugger(Simulator* sim); + ~Debugger(); void Stop(Instruction* instr); void Debug(); @@ -67,7 +67,6 @@ class ArmDebugger { Simulator* sim_; int32_t GetRegisterValue(int regnum); - double GetRegisterPairDoubleValue(int regnum); double GetVFPDoubleRegisterValue(int regnum); bool GetValue(const char* desc, int32_t* value); bool GetVFPSingleValue(const char* desc, float* value); @@ -84,12 +83,12 @@ class ArmDebugger { }; -ArmDebugger::ArmDebugger(Simulator* sim) { +Debugger::Debugger(Simulator* sim) { sim_ = sim; } -ArmDebugger::~ArmDebugger() { +Debugger::~Debugger() { } @@ -106,7 +105,7 @@ static void InitializeCoverage() { } -void ArmDebugger::Stop(Instruction* instr) { +void Debugger::Stop(Instruction* instr) { // Get the stop code. uint32_t code = instr->SvcValue() & kStopCodeMask; // Retrieve the encoded address, which comes just after this stop. @@ -138,7 +137,7 @@ static void InitializeCoverage() { } -void ArmDebugger::Stop(Instruction* instr) { +void Debugger::Stop(Instruction* instr) { // Get the stop code. uint32_t code = instr->SvcValue() & kStopCodeMask; // Retrieve the encoded address, which comes just after this stop. @@ -160,7 +159,7 @@ void ArmDebugger::Stop(Instruction* instr) { #endif -int32_t ArmDebugger::GetRegisterValue(int regnum) { +int32_t Debugger::GetRegisterValue(int regnum) { if (regnum == kPCRegister) { return sim_->get_pc(); } else { @@ -169,17 +168,12 @@ int32_t ArmDebugger::GetRegisterValue(int regnum) { } -double ArmDebugger::GetRegisterPairDoubleValue(int regnum) { - return sim_->get_double_from_register_pair(regnum); -} - - -double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) { +double Debugger::GetVFPDoubleRegisterValue(int regnum) { return sim_->get_double_from_d_register(regnum); } -bool ArmDebugger::GetValue(const char* desc, int32_t* value) { +bool Debugger::GetValue(const char* desc, int32_t* value) { int regnum = Registers::Number(desc); if (regnum != kNoRegister) { *value = GetRegisterValue(regnum); @@ -195,7 +189,7 @@ bool ArmDebugger::GetValue(const char* desc, int32_t* value) { } -bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) { +bool Debugger::GetVFPSingleValue(const char* desc, float* value) { bool is_double; int regnum = VFPRegisters::Number(desc, &is_double); if (regnum != kNoRegister && !is_double) { @@ -206,7 +200,7 @@ bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) { } -bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) { +bool Debugger::GetVFPDoubleValue(const char* desc, double* value) { bool is_double; int regnum = VFPRegisters::Number(desc, &is_double); if (regnum != kNoRegister && is_double) { @@ -217,7 +211,7 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) { } -bool ArmDebugger::SetBreakpoint(Instruction* breakpc) { +bool Debugger::SetBreakpoint(Instruction* breakpc) { // Check if a breakpoint can be set. If not return without any side-effects. if (sim_->break_pc_ != NULL) { return false; @@ -232,7 +226,7 @@ bool ArmDebugger::SetBreakpoint(Instruction* breakpc) { } -bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) { +bool Debugger::DeleteBreakpoint(Instruction* breakpc) { if (sim_->break_pc_ != NULL) { sim_->break_pc_->SetInstructionBits(sim_->break_instr_); } @@ -243,21 +237,21 @@ bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) { } -void ArmDebugger::UndoBreakpoints() { +void Debugger::UndoBreakpoints() { if (sim_->break_pc_ != NULL) { sim_->break_pc_->SetInstructionBits(sim_->break_instr_); } } -void ArmDebugger::RedoBreakpoints() { +void Debugger::RedoBreakpoints() { if (sim_->break_pc_ != NULL) { sim_->break_pc_->SetInstructionBits(kBreakpointInstr); } } -void ArmDebugger::Debug() { +void Debugger::Debug() { intptr_t last_pc = -1; bool done = false; @@ -311,45 +305,27 @@ void ArmDebugger::Debug() { // Leave the debugger shell. done = true; } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { - if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) { + if (argc == 2) { int32_t value; float svalue; double dvalue; if (strcmp(arg1, "all") == 0) { for (int i = 0; i < kNumRegisters; i++) { value = GetRegisterValue(i); - PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value); - if ((argc == 3 && strcmp(arg2, "fp") == 0) && - i < 8 && - (i % 2) == 0) { - dvalue = GetRegisterPairDoubleValue(i); - PrintF(" (%f)\n", dvalue); - } else { - PrintF("\n"); - } + PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value); } for (int i = 0; i < kNumVFPDoubleRegisters; i++) { dvalue = GetVFPDoubleRegisterValue(i); - uint64_t as_words = BitCast<uint64_t>(dvalue); - PrintF("%3s: %f 0x%08x %08x\n", - VFPRegisters::Name(i, true), - dvalue, - static_cast<uint32_t>(as_words >> 32), - static_cast<uint32_t>(as_words & 0xffffffff)); + PrintF("%3s: %f\n", + VFPRegisters::Name(i, true), dvalue); } } else { if (GetValue(arg1, &value)) { PrintF("%s: 0x%08x %d \n", arg1, value, value); } else if (GetVFPSingleValue(arg1, &svalue)) { - uint32_t as_word = BitCast<uint32_t>(svalue); - PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word); + PrintF("%s: %f \n", arg1, svalue); } else if (GetVFPDoubleValue(arg1, &dvalue)) { - uint64_t as_words = BitCast<uint64_t>(dvalue); - PrintF("%s: %f 0x%08x %08x\n", - arg1, - dvalue, - static_cast<uint32_t>(as_words >> 32), - static_cast<uint32_t>(as_words & 0xffffffff)); + PrintF("%s: %f \n", arg1, dvalue); } else { PrintF("%s unrecognized\n", arg1); } @@ -404,24 +380,11 @@ void ArmDebugger::Debug() { end = cur + words; while (cur < end) { - PrintF(" 0x%08x: 0x%08x %10d", + PrintF(" 0x%08x: 0x%08x %10d\n", reinterpret_cast<intptr_t>(cur), *cur, *cur); - HeapObject* obj = reinterpret_cast<HeapObject*>(*cur); - int value = *cur; - Heap* current_heap = v8::internal::Isolate::Current()->heap(); - if (current_heap->Contains(obj) || ((value & 1) == 0)) { - PrintF(" ("); - if ((value & 1) == 0) { - PrintF("smi %d", value / 2); - } else { - obj->ShortPrint(); - } - PrintF(")"); - } - PrintF("\n"); cur++; } - } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) { + } else if (strcmp(cmd, "disasm") == 0) { disasm::NameConverter converter; disasm::Disassembler dasm(converter); // use a reasonably large buffer @@ -435,23 +398,11 @@ void ArmDebugger::Debug() { cur = reinterpret_cast<byte*>(sim_->get_pc()); end = cur + (10 * Instruction::kInstrSize); } else if (argc == 2) { - int regnum = Registers::Number(arg1); - if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) { - // The argument is an address or a register name. - int32_t value; - if (GetValue(arg1, &value)) { - cur = reinterpret_cast<byte*>(value); - // Disassemble 10 instructions at <arg1>. - end = cur + (10 * Instruction::kInstrSize); - } - } else { - // The argument is the number of instructions. - int32_t value; - if (GetValue(arg1, &value)) { - cur = reinterpret_cast<byte*>(sim_->get_pc()); - // Disassemble <arg1> instructions. - end = cur + (value * Instruction::kInstrSize); - } + int32_t value; + if (GetValue(arg1, &value)) { + cur = reinterpret_cast<byte*>(sim_->get_pc()); + // Disassemble <arg1> instructions. + end = cur + (value * Instruction::kInstrSize); } } else { int32_t value1; @@ -564,7 +515,6 @@ void ArmDebugger::Debug() { PrintF("print <register>\n"); PrintF(" print register content (alias 'p')\n"); PrintF(" use register name 'all' to print all registers\n"); - PrintF(" add argument 'fp' to print register pair double values\n"); PrintF("printobject <register>\n"); PrintF(" print an object from a register (alias 'po')\n"); PrintF("flags\n"); @@ -574,10 +524,8 @@ void ArmDebugger::Debug() { PrintF("mem <address> [<words>]\n"); PrintF(" dump memory content, default dump 10 words)\n"); PrintF("disasm [<instructions>]\n"); - PrintF("disasm [<address/register>]\n"); - PrintF("disasm [[<address/register>] <instructions>]\n"); - PrintF(" disassemble code, default is 10 instructions\n"); - PrintF(" from pc (alias 'di')\n"); + PrintF("disasm [[<address>] <instructions>]\n"); + PrintF(" disassemble code, default is 10 instructions from pc\n"); PrintF("gdb\n"); PrintF(" enter gdb\n"); PrintF("break <address>\n"); @@ -591,11 +539,11 @@ void ArmDebugger::Debug() { PrintF(" Stops are debug instructions inserted by\n"); PrintF(" the Assembler::stop() function.\n"); PrintF(" When hitting a stop, the Simulator will\n"); - PrintF(" stop and and give control to the ArmDebugger.\n"); + PrintF(" stop and and give control to the Debugger.\n"); PrintF(" The first %d stop codes are watched:\n", Simulator::kNumOfWatchedStops); PrintF(" - They can be enabled / disabled: the Simulator\n"); - PrintF(" will / won't stop when hitting them.\n"); + PrintF(" will / won't stop when hitting them.\n"); PrintF(" - The Simulator keeps track of how many times they \n"); PrintF(" are met. (See the info command.) Going over a\n"); PrintF(" disabled stop still increases its counter. \n"); @@ -645,9 +593,7 @@ static bool AllOnOnePage(uintptr_t start, int size) { } -void Simulator::FlushICache(v8::internal::HashMap* i_cache, - void* start_addr, - size_t size) { +void Simulator::FlushICache(void* start_addr, size_t size) { intptr_t start = reinterpret_cast<intptr_t>(start_addr); int intra_line = (start & CachePage::kLineMask); start -= intra_line; @@ -656,22 +602,22 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache, int offset = (start & CachePage::kPageMask); while (!AllOnOnePage(start, size - 1)) { int bytes_to_flush = CachePage::kPageSize - offset; - FlushOnePage(i_cache, start, bytes_to_flush); + FlushOnePage(start, bytes_to_flush); start += bytes_to_flush; size -= bytes_to_flush; ASSERT_EQ(0, start & CachePage::kPageMask); offset = 0; } if (size != 0) { - FlushOnePage(i_cache, start, size); + FlushOnePage(start, size); } } -CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) { - v8::internal::HashMap::Entry* entry = i_cache->Lookup(page, - ICacheHash(page), - true); +CachePage* Simulator::GetCachePage(void* page) { + v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page, + ICacheHash(page), + true); if (entry->value == NULL) { CachePage* new_page = new CachePage(); entry->value = new_page; @@ -681,28 +627,25 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) { // Flush from start up to and not including start + size. -void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, - intptr_t start, - int size) { +void Simulator::FlushOnePage(intptr_t start, int size) { ASSERT(size <= CachePage::kPageSize); ASSERT(AllOnOnePage(start, size - 1)); ASSERT((start & CachePage::kLineMask) == 0); ASSERT((size & CachePage::kLineMask) == 0); void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask)); int offset = (start & CachePage::kPageMask); - CachePage* cache_page = GetCachePage(i_cache, page); + CachePage* cache_page = GetCachePage(page); char* valid_bytemap = cache_page->ValidityByte(offset); memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); } -void Simulator::CheckICache(v8::internal::HashMap* i_cache, - Instruction* instr) { +void Simulator::CheckICache(Instruction* instr) { intptr_t address = reinterpret_cast<intptr_t>(instr); void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask)); void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask)); int offset = (address & CachePage::kPageMask); - CachePage* cache_page = GetCachePage(i_cache, page); + CachePage* cache_page = GetCachePage(page); char* cache_valid_byte = cache_page->ValidityByte(offset); bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); @@ -719,21 +662,29 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache, } -void Simulator::Initialize(Isolate* isolate) { - if (isolate->simulator_initialized()) return; - isolate->set_simulator_initialized(true); - ::v8::internal::ExternalReference::set_redirector(isolate, - &RedirectExternalReference); +// Create one simulator per thread and keep it in thread local storage. +static v8::internal::Thread::LocalStorageKey simulator_key; + + +bool Simulator::initialized_ = false; + + +void Simulator::Initialize() { + if (initialized_) return; + simulator_key = v8::internal::Thread::CreateThreadLocalKey(); + initialized_ = true; + ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference); } -Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { - i_cache_ = isolate_->simulator_i_cache(); +v8::internal::HashMap* Simulator::i_cache_ = NULL; + + +Simulator::Simulator() { if (i_cache_ == NULL) { i_cache_ = new v8::internal::HashMap(&ICacheMatch); - isolate_->set_simulator_i_cache(i_cache_); } - Initialize(isolate); + Initialize(); // Setup simulator support first. Some of this information is needed to // setup the architecture state. size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack @@ -797,14 +748,11 @@ class Redirection { : external_function_(external_function), swi_instruction_(al | (0xf*B24) | kCallRtRedirected), type_(type), - next_(NULL) { - Isolate* isolate = Isolate::Current(); - next_ = isolate->simulator_redirection(); - Simulator::current(isolate)-> - FlushICache(isolate->simulator_i_cache(), - reinterpret_cast<void*>(&swi_instruction_), - Instruction::kInstrSize); - isolate->set_simulator_redirection(this); + next_(list_) { + Simulator::current()-> + FlushICache(reinterpret_cast<void*>(&swi_instruction_), + Instruction::kInstrSize); + list_ = this; } void* address_of_swi_instruction() { @@ -816,9 +764,8 @@ class Redirection { static Redirection* Get(void* external_function, ExternalReference::Type type) { - Isolate* isolate = Isolate::Current(); - Redirection* current = isolate->simulator_redirection(); - for (; current != NULL; current = current->next_) { + Redirection* current; + for (current = list_; current != NULL; current = current->next_) { if (current->external_function_ == external_function) return current; } return new Redirection(external_function, type); @@ -836,9 +783,13 @@ class Redirection { uint32_t swi_instruction_; ExternalReference::Type type_; Redirection* next_; + static Redirection* list_; }; +Redirection* Redirection::list_ = NULL; + + void* Simulator::RedirectExternalReference(void* external_function, ExternalReference::Type type) { Redirection* redirection = Redirection::Get(external_function, type); @@ -847,16 +798,14 @@ void* Simulator::RedirectExternalReference(void* external_function, // Get the active Simulator for the current thread. -Simulator* Simulator::current(Isolate* isolate) { - v8::internal::Isolate::PerIsolateThreadData* isolate_data = - isolate->FindOrAllocatePerThreadDataForThisThread(); - ASSERT(isolate_data != NULL); - - Simulator* sim = isolate_data->simulator(); +Simulator* Simulator::current() { + Initialize(); + Simulator* sim = reinterpret_cast<Simulator*>( + v8::internal::Thread::GetThreadLocal(simulator_key)); if (sim == NULL) { - // TODO(146): delete the simulator object when a thread/isolate goes away. - sim = new Simulator(isolate); - isolate_data->set_simulator(sim); + // TODO(146): delete the simulator object when a thread goes away. + sim = new Simulator(); + v8::internal::Thread::SetThreadLocal(simulator_key, sim); } return sim; } @@ -885,19 +834,6 @@ int32_t Simulator::get_register(int reg) const { } -double Simulator::get_double_from_register_pair(int reg) { - ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0)); - - double dm_val = 0.0; - // Read the bits from the unsigned integer register_[] array - // into the double precision floating point value and return it. - char buffer[2 * sizeof(vfp_register[0])]; - memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); - memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); - return(dm_val); -} - - void Simulator::set_dw_register(int dreg, const int* dbl) { ASSERT((dreg >= 0) && (dreg < num_d_registers)); registers_[dreg] = dbl[0]; @@ -963,7 +899,12 @@ void Simulator::set_d_register_from_double(int dreg, const double& dbl) { // 2*sreg and 2*sreg+1. char buffer[2 * sizeof(vfp_register[0])]; memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0])); +#ifndef BIG_ENDIAN_FLOATING_POINT memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0])); +#else + memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0])); + memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0])); +#endif } @@ -1000,80 +941,37 @@ double Simulator::get_double_from_d_register(int dreg) { // Read the bits from the unsigned integer vfp_register[] array // into the double precision floating point value and return it. char buffer[2 * sizeof(vfp_register[0])]; +#ifdef BIG_ENDIAN_FLOATING_POINT + memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0])); + memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0])); +#else memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0])); +#endif memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0])); return(dm_val); } -// For use in calls that take two double values, constructed either -// from r0-r3 or d0 and d1. +// For use in calls that take two double values, constructed from r0, r1, r2 +// and r3. void Simulator::GetFpArgs(double* x, double* y) { - if (use_eabi_hardfloat()) { - *x = vfp_register[0]; - *y = vfp_register[1]; - } else { - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; - // Registers 0 and 1 -> x. - memcpy(buffer, registers_, sizeof(*x)); - memcpy(x, buffer, sizeof(*x)); - // Registers 2 and 3 -> y. - memcpy(buffer, registers_ + 2, sizeof(*y)); - memcpy(y, buffer, sizeof(*y)); - } -} - -// For use in calls that take one double value, constructed either -// from r0 and r1 or d0. -void Simulator::GetFpArgs(double* x) { - if (use_eabi_hardfloat()) { - *x = vfp_register[0]; - } else { - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; - // Registers 0 and 1 -> x. - memcpy(buffer, registers_, sizeof(*x)); - memcpy(x, buffer, sizeof(*x)); - } -} - - -// For use in calls that take one double value constructed either -// from r0 and r1 or d0 and one integer value. -void Simulator::GetFpArgs(double* x, int32_t* y) { - if (use_eabi_hardfloat()) { - *x = vfp_register[0]; - *y = registers_[1]; - } else { - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; - // Registers 0 and 1 -> x. - memcpy(buffer, registers_, sizeof(*x)); - memcpy(x, buffer, sizeof(*x)); - // Register 2 -> y. - memcpy(buffer, registers_ + 2, sizeof(*y)); - memcpy(y, buffer, sizeof(*y)); - } + // We use a char buffer to get around the strict-aliasing rules which + // otherwise allow the compiler to optimize away the copy. + char buffer[2 * sizeof(registers_[0])]; + // Registers 0 and 1 -> x. + memcpy(buffer, registers_, sizeof(buffer)); + memcpy(x, buffer, sizeof(buffer)); + // Registers 2 and 3 -> y. + memcpy(buffer, registers_ + 2, sizeof(buffer)); + memcpy(y, buffer, sizeof(buffer)); } -// The return value is either in r0/r1 or d0. void Simulator::SetFpResult(const double& result) { - if (use_eabi_hardfloat()) { - char buffer[2 * sizeof(vfp_register[0])]; - memcpy(buffer, &result, sizeof(buffer)); - // Copy result to d0. - memcpy(vfp_register, buffer, sizeof(buffer)); - } else { - char buffer[2 * sizeof(registers_[0])]; - memcpy(buffer, &result, sizeof(buffer)); - // Copy result to r0 and r1. - memcpy(registers_, buffer, sizeof(buffer)); - } + char buffer[2 * sizeof(registers_[0])]; + memcpy(buffer, &result, sizeof(buffer)); + // result -> registers 0 and 1. + memcpy(registers_, buffer, sizeof(buffer)); } @@ -1327,13 +1225,12 @@ void Simulator::SetVFlag(bool val) { // Calculate C flag value for additions. -bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) { +bool Simulator::CarryFrom(int32_t left, int32_t right) { uint32_t uleft = static_cast<uint32_t>(left); uint32_t uright = static_cast<uint32_t>(right); uint32_t urest = 0xffffffffU - uleft; - return (uright > urest) || - (carry && (((uright + 1) > urest) || (uright > (urest - 1)))); + return (uright > urest); } @@ -1568,34 +1465,36 @@ static int count_bits(int bit_vector) { } -void Simulator::ProcessPUW(Instruction* instr, - int num_regs, - int reg_size, - intptr_t* start_address, - intptr_t* end_address) { +// Addressing Mode 4 - Load and Store Multiple +void Simulator::HandleRList(Instruction* instr, bool load) { int rn = instr->RnValue(); int32_t rn_val = get_register(rn); + int rlist = instr->RlistValue(); + int num_regs = count_bits(rlist); + + intptr_t start_address = 0; + intptr_t end_address = 0; switch (instr->PUField()) { case da_x: { UNIMPLEMENTED(); break; } case ia_x: { - *start_address = rn_val; - *end_address = rn_val + (num_regs * reg_size) - reg_size; - rn_val = rn_val + (num_regs * reg_size); + start_address = rn_val; + end_address = rn_val + (num_regs * 4) - 4; + rn_val = rn_val + (num_regs * 4); break; } case db_x: { - *start_address = rn_val - (num_regs * reg_size); - *end_address = rn_val - reg_size; - rn_val = *start_address; + start_address = rn_val - (num_regs * 4); + end_address = rn_val - 4; + rn_val = start_address; break; } case ib_x: { - *start_address = rn_val + reg_size; - *end_address = rn_val + (num_regs * reg_size); - rn_val = *end_address; + start_address = rn_val + 4; + end_address = rn_val + (num_regs * 4); + rn_val = end_address; break; } default: { @@ -1606,17 +1505,6 @@ void Simulator::ProcessPUW(Instruction* instr, if (instr->HasW()) { set_register(rn, rn_val); } -} - -// Addressing Mode 4 - Load and Store Multiple -void Simulator::HandleRList(Instruction* instr, bool load) { - int rlist = instr->RlistValue(); - int num_regs = count_bits(rlist); - - intptr_t start_address = 0; - intptr_t end_address = 0; - ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address); - intptr_t* address = reinterpret_cast<intptr_t*>(start_address); int reg = 0; while (rlist != 0) { @@ -1635,57 +1523,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) { } -// Addressing Mode 6 - Load and Store Multiple Coprocessor registers. -void Simulator::HandleVList(Instruction* instr) { - VFPRegPrecision precision = - (instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision; - int operand_size = (precision == kSinglePrecision) ? 4 : 8; - - bool load = (instr->VLValue() == 0x1); - - int vd; - int num_regs; - vd = instr->VFPDRegValue(precision); - if (precision == kSinglePrecision) { - num_regs = instr->Immed8Value(); - } else { - num_regs = instr->Immed8Value() / 2; - } - - intptr_t start_address = 0; - intptr_t end_address = 0; - ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address); - - intptr_t* address = reinterpret_cast<intptr_t*>(start_address); - for (int reg = vd; reg < vd + num_regs; reg++) { - if (precision == kSinglePrecision) { - if (load) { - set_s_register_from_sinteger( - reg, ReadW(reinterpret_cast<int32_t>(address), instr)); - } else { - WriteW(reinterpret_cast<int32_t>(address), - get_sinteger_from_s_register(reg), instr); - } - address += 1; - } else { - if (load) { - set_s_register_from_sinteger( - 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr)); - set_s_register_from_sinteger( - 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr)); - } else { - WriteW(reinterpret_cast<int32_t>(address), - get_sinteger_from_s_register(2 * reg), instr); - WriteW(reinterpret_cast<int32_t>(address + 1), - get_sinteger_from_s_register(2 * reg + 1), instr); - } - address += 2; - } - } - ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address); -} - - // Calls into the V8 runtime are based on this very simple interface. // Note: To be able to return two values from some calls the code in runtime.cc // uses the ObjectPair which is essentially two 32-bit values stuffed into a @@ -1696,8 +1533,7 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3, - int32_t arg4, - int32_t arg5); + int32_t arg4); typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, int32_t arg1, int32_t arg2, @@ -1728,94 +1564,28 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { int32_t arg2 = get_register(r2); int32_t arg3 = get_register(r3); int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp)); - int32_t arg4 = stack_pointer[0]; - int32_t arg5 = stack_pointer[1]; - bool fp_call = - (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || - (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || - (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || - (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); - if (use_eabi_hardfloat()) { - // With the hard floating point calling convention, double - // arguments are passed in VFP registers. Fetch the arguments - // from there and call the builtin using soft floating point - // convention. - switch (redirection->type()) { - case ExternalReference::BUILTIN_FP_FP_CALL: - case ExternalReference::BUILTIN_COMPARE_CALL: - arg0 = vfp_register[0]; - arg1 = vfp_register[1]; - arg2 = vfp_register[2]; - arg3 = vfp_register[3]; - break; - case ExternalReference::BUILTIN_FP_CALL: - arg0 = vfp_register[0]; - arg1 = vfp_register[1]; - break; - case ExternalReference::BUILTIN_FP_INT_CALL: - arg0 = vfp_register[0]; - arg1 = vfp_register[1]; - arg2 = get_register(0); - break; - default: - break; - } - } + int32_t arg4 = *stack_pointer; // This is dodgy but it works because the C entry stubs are never moved. // See comment in codegen-arm.cc and bug 1242173. int32_t saved_lr = get_register(lr); intptr_t external = reinterpret_cast<intptr_t>(redirection->external_function()); - if (fp_call) { + if (redirection->type() == ExternalReference::FP_RETURN_CALL) { + SimulatorRuntimeFPCall target = + reinterpret_cast<SimulatorRuntimeFPCall>(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { - SimulatorRuntimeFPCall target = - reinterpret_cast<SimulatorRuntimeFPCall>(external); - double dval0, dval1; - int32_t ival; - switch (redirection->type()) { - case ExternalReference::BUILTIN_FP_FP_CALL: - case ExternalReference::BUILTIN_COMPARE_CALL: - GetFpArgs(&dval0, &dval1); - PrintF("Call to host function at %p with args %f, %f", - FUNCTION_ADDR(target), dval0, dval1); - break; - case ExternalReference::BUILTIN_FP_CALL: - GetFpArgs(&dval0); - PrintF("Call to host function at %p with arg %f", - FUNCTION_ADDR(target), dval0); - break; - case ExternalReference::BUILTIN_FP_INT_CALL: - GetFpArgs(&dval0, &ival); - PrintF("Call to host function at %p with args %f, %d", - FUNCTION_ADDR(target), dval0, ival); - break; - default: - UNREACHABLE(); - break; - } + double x, y; + GetFpArgs(&x, &y); + PrintF("Call to host function at %p with args %f, %f", + FUNCTION_ADDR(target), x, y); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } PrintF("\n"); } CHECK(stack_aligned); - if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) { - SimulatorRuntimeFPCall target = - reinterpret_cast<SimulatorRuntimeFPCall>(external); - double result = target(arg0, arg1, arg2, arg3); - SetFpResult(result); - } else { - SimulatorRuntimeCall target = - reinterpret_cast<SimulatorRuntimeCall>(external); - int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); - int32_t lo_res = static_cast<int32_t>(result); - int32_t hi_res = static_cast<int32_t>(result >> 32); - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %08x\n", lo_res); - } - set_register(r0, lo_res); - set_register(r1, hi_res); - } + double result = target(arg0, arg1, arg2, arg3); + SetFpResult(result); } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { SimulatorRuntimeDirectApiCall target = reinterpret_cast<SimulatorRuntimeDirectApiCall>(external); @@ -1857,22 +1627,20 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { reinterpret_cast<SimulatorRuntimeCall>(external); if (::v8::internal::FLAG_trace_sim || !stack_aligned) { PrintF( - "Call to host function at %p" - "args %08x, %08x, %08x, %08x, %08x, %08x", + "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc", FUNCTION_ADDR(target), arg0, arg1, arg2, arg3, - arg4, - arg5); + arg4); if (!stack_aligned) { PrintF(" with unaligned stack %08x\n", get_register(sp)); } PrintF("\n"); } CHECK(stack_aligned); - int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); + int64_t result = target(arg0, arg1, arg2, arg3, arg4); int32_t lo_res = static_cast<int32_t>(result); int32_t hi_res = static_cast<int32_t>(result >> 32); if (::v8::internal::FLAG_trace_sim) { @@ -1886,7 +1654,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { break; } case kBreakpoint: { - ArmDebugger dbg(this); + Debugger dbg(this); dbg.Debug(); break; } @@ -1900,7 +1668,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { // Stop if it is enabled, otherwise go on jumping over the stop // and the message address. if (isEnabledStop(code)) { - ArmDebugger dbg(this); + Debugger dbg(this); dbg.Stop(instr); } else { set_pc(get_pc() + 2 * Instruction::kInstrSize); @@ -2208,7 +1976,7 @@ void Simulator::DecodeType01(Instruction* instr) { break; } case BKPT: { - ArmDebugger dbg(this); + Debugger dbg(this); PrintF("Simulator hit BKPT.\n"); dbg.Debug(); break; @@ -2320,15 +2088,8 @@ void Simulator::DecodeType01(Instruction* instr) { } case ADC: { - // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm"); - // Format(instr, "adc'cond's 'rd, 'rn, 'imm"); - alu_out = rn_val + shifter_operand + GetCarry(); - set_register(rd, alu_out); - if (instr->HasS()) { - SetNZFlags(alu_out); - SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry())); - SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true)); - } + Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm"); + Format(instr, "adc'cond's 'rd, 'rn, 'imm"); break; } @@ -2706,8 +2467,6 @@ void Simulator::DecodeType7(Instruction* instr) { // vmov :Rt = Sn // vcvt: Dd = Sm // vcvt: Sd = Dm -// Dd = vabs(Dm) -// Dd = vneg(Dm) // Dd = vadd(Dn, Dm) // Dd = vsub(Dn, Dm) // Dd = vmul(Dn, Dm) @@ -2743,11 +2502,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = fabs(dm_value); set_d_register_from_double(vd, dd_value); - } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) { - // vneg - double dm_value = get_double_from_d_register(vm); - double dd_value = -dm_value; - set_d_register_from_double(vd, dd_value); } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { @@ -3141,17 +2895,9 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { } break; } - case 0x4: - case 0x5: - case 0x6: - case 0x7: - case 0x9: - case 0xB: - // Load/store multiple single from memory: vldm/vstm. - HandleVList(instr); - break; default: UNIMPLEMENTED(); // Not used by V8. + break; } } else if (instr->CoprocessorValue() == 0xB) { switch (instr->OpcodeValue()) { @@ -3198,14 +2944,9 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { } break; } - case 0x4: - case 0x5: - case 0x9: - // Load/store multiple double from memory: vldm/vstm. - HandleVList(instr); - break; default: UNIMPLEMENTED(); // Not used by V8. + break; } } else { UNIMPLEMENTED(); // Not used by V8. @@ -3216,7 +2957,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { // Executes the current instruction. void Simulator::InstructionDecode(Instruction* instr) { if (v8::internal::FLAG_check_icache) { - CheckICache(isolate_->simulator_i_cache(), instr); + CheckICache(instr); } pc_modified_ = false; if (::v8::internal::FLAG_trace_sim) { @@ -3299,7 +3040,7 @@ void Simulator::Execute() { Instruction* instr = reinterpret_cast<Instruction*>(program_counter); icount_++; if (icount_ == ::v8::internal::FLAG_stop_sim_at) { - ArmDebugger dbg(this); + Debugger dbg(this); dbg.Debug(); } else { InstructionDecode(instr); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 391ef69f5e..bdf1f8a106 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -49,28 +49,25 @@ namespace internal { (entry(p0, p1, p2, p3, p4)) typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, - void*, int*, Address, int, Isolate*); + void*, int*, Address, int); // Call the generated regexp code directly. The code at the entry address // should act as a function matching the type arm_regexp_matcher. // The fifth argument is a dummy that reserves the space used for // the return address added by the ExitFrame in native calls. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ - (FUNCTION_CAST<arm_regexp_matcher>(entry)( \ - p0, p1, p2, p3, NULL, p4, p5, p6, p7)) +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ + (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - reinterpret_cast<TryCatch*>(try_catch_address) + (reinterpret_cast<TryCatch*>(try_catch_address)) // The stack limit beyond which we will throw stack overflow errors in // generated code. Because generated code on arm uses the C stack, we // just use the C stack limit. class SimulatorStack : public v8::internal::AllStatic { public: - static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate, - uintptr_t c_limit) { - USE(isolate); + static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { return c_limit; } @@ -126,7 +123,7 @@ class CachePage { class Simulator { public: - friend class ArmDebugger; + friend class Debugger; enum Register { no_reg = -1, r0 = 0, r1, r2, r3, r4, r5, r6, r7, @@ -145,19 +142,18 @@ class Simulator { num_d_registers = 16 }; - explicit Simulator(Isolate* isolate); + Simulator(); ~Simulator(); // The currently executing Simulator instance. Potentially there can be one // for each native thread. - static Simulator* current(v8::internal::Isolate* isolate); + static Simulator* current(); // Accessors for register state. Reading the pc value adheres to the ARM // architecture specification and is off by a 8 from the currently executing // instruction. void set_register(int reg, int32_t value); int32_t get_register(int reg) const; - double get_double_from_register_pair(int reg); void set_dw_register(int dreg, const int* dbl); // Support for VFP. @@ -181,7 +177,7 @@ class Simulator { void Execute(); // Call on program start. - static void Initialize(Isolate* isolate); + static void Initialize(); // V8 generally calls into generated JS code with 5 parameters and into // generated RegExp code with 7 parameters. This is a convenience function, @@ -195,22 +191,12 @@ class Simulator { uintptr_t PopAddress(); // ICache checking. - static void FlushICache(v8::internal::HashMap* i_cache, void* start, - size_t size); + static void FlushICache(void* start, size_t size); // Returns true if pc register contains one of the 'special_values' defined // below (bad_lr, end_sim_pc). bool has_bad_pc() const; - // EABI variant for double arguments in use. - bool use_eabi_hardfloat() { -#if USE_EABI_HARDFLOAT - return true; -#else - return false; -#endif - } - private: enum special_values { // Known bad pc value to ensure that the simulator does not execute @@ -234,17 +220,13 @@ class Simulator { void SetNZFlags(int32_t val); void SetCFlag(bool val); void SetVFlag(bool val); - bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0); + bool CarryFrom(int32_t left, int32_t right); bool BorrowFrom(int32_t left, int32_t right); bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right, bool addition); - inline int GetCarry() { - return c_flag_ ? 1 : 0; - }; - // Support for VFP. void Compute_FPSCR_Flags(double val1, double val2); void Copy_FPSCR_to_APSR(); @@ -252,13 +234,7 @@ class Simulator { // Helper functions to decode common "addressing" modes int32_t GetShiftRm(Instruction* instr, bool* carry_out); int32_t GetImm(Instruction* instr, bool* carry_out); - void ProcessPUW(Instruction* instr, - int num_regs, - int operand_size, - intptr_t* start_address, - intptr_t* end_address); void HandleRList(Instruction* instr, bool load); - void HandleVList(Instruction* inst); void SoftwareInterrupt(Instruction* instr); // Stop helper functions. @@ -311,20 +287,18 @@ class Simulator { void InstructionDecode(Instruction* instr); // ICache. - static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr); - static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start, - int size); - static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page); + static void CheckICache(Instruction* instr); + static void FlushOnePage(intptr_t start, int size); + static CachePage* GetCachePage(void* page); // Runtime call support. static void* RedirectExternalReference( void* external_function, v8::internal::ExternalReference::Type type); - // For use in calls that take double value arguments. + // For use in calls that take two double values, constructed from r0, r1, r2 + // and r3. void GetFpArgs(double* x, double* y); - void GetFpArgs(double* x); - void GetFpArgs(double* x, int32_t* y); void SetFpResult(const double& result); void TrashCallerSaveRegisters(); @@ -359,16 +333,15 @@ class Simulator { char* stack_; bool pc_modified_; int icount_; + static bool initialized_; // Icache simulation - v8::internal::HashMap* i_cache_; + static v8::internal::HashMap* i_cache_; // Registered breakpoints. Instruction* break_pc_; Instr break_instr_; - v8::internal::Isolate* isolate_; - // A stop is watched if its code is less than kNumOfWatchedStops. // Only watched stops support enabling/disabling and the counter feature. static const uint32_t kNumOfWatchedStops = 256; @@ -391,16 +364,15 @@ class Simulator { // When running with the simulator transition into simulated execution at this // point. #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ - reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ + reinterpret_cast<Object*>(Simulator::current()->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ - Simulator::current(Isolate::Current())->Call( \ - entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7) +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ + Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6) -#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ - try_catch_address == NULL ? \ - NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) +#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ + try_catch_address == \ + NULL ? NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) // The simulator has its own stack. Thus it has a different stack limit from @@ -410,18 +382,17 @@ class Simulator { // trouble down the line. class SimulatorStack : public v8::internal::AllStatic { public: - static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate, - uintptr_t c_limit) { - return Simulator::current(isolate)->StackLimit(); + static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { + return Simulator::current()->StackLimit(); } static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { - Simulator* sim = Simulator::current(Isolate::Current()); + Simulator* sim = Simulator::current(); return sim->PushAddress(try_catch_address); } static inline void UnregisterCTryCatch() { - Simulator::current(Isolate::Current())->PopAddress(); + Simulator::current()->PopAddress(); } }; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 7ea000edbd..60a11f3ced 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -30,7 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "ic-inl.h" -#include "codegen.h" +#include "codegen-inl.h" #include "stub-cache.h" namespace v8 { @@ -39,16 +39,15 @@ namespace internal { #define __ ACCESS_MASM(masm) -static void ProbeTable(Isolate* isolate, - MacroAssembler* masm, +static void ProbeTable(MacroAssembler* masm, Code::Flags flags, StubCache::Table table, Register name, Register offset, Register scratch, Register scratch2) { - ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); - ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + ExternalReference key_offset(SCTableReference::keyReference(table)); + ExternalReference value_offset(SCTableReference::valueReference(table)); uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); @@ -95,17 +94,15 @@ static void ProbeTable(Isolate* isolate, // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. // Name must be a symbol and receiver must be a heap object. -MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup( - MacroAssembler* masm, - Label* miss_label, - Register receiver, - String* name, - Register scratch0, - Register scratch1) { +static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, + Label* miss_label, + Register receiver, + String* name, + Register scratch0, + Register scratch1) { ASSERT(name->IsSymbol()); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); - __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); + __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1); + __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1); Label done; @@ -121,7 +118,7 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup( // Check that receiver is a JSObject. __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, miss_label); // Load properties array. @@ -137,21 +134,71 @@ MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup( // Restore the temporarily used register. __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - - MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup( - masm, - miss_label, - &done, - receiver, - properties, - name, - scratch1); - if (result->IsFailure()) return result; - + // Compute the capacity mask. + const int kCapacityOffset = + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + + // Generate an unrolled loop that performs a few probes before + // giving up. + static const int kProbes = 4; + const int kElementsStartOffset = + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the null value). + for (int i = 0; i < kProbes; i++) { + // scratch0 points to properties hash. + // Compute the masked index: (hash + i + i * i) & mask. + Register index = scratch1; + // Capacity is smi 2^n. + __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); + __ sub(index, index, Operand(1)); + __ and_(index, index, Operand( + Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ add(index, index, Operand(index, LSL, 1)); // index *= 3. + + Register entity_name = scratch1; + // Having undefined at this place means the name is not contained. + ASSERT_EQ(kSmiTagSize, 1); + Register tmp = properties; + __ add(tmp, properties, Operand(index, LSL, 1)); + __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); + + ASSERT(!tmp.is(entity_name)); + __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); + __ cmp(entity_name, tmp); + if (i != kProbes - 1) { + __ b(eq, &done); + + // Stop if found the property. + __ cmp(entity_name, Operand(Handle<String>(name))); + __ b(eq, miss_label); + + // Check if the entry name is not a symbol. + __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); + __ ldrb(entity_name, + FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); + __ tst(entity_name, Operand(kIsSymbolMask)); + __ b(eq, miss_label); + + // Restore the properties. + __ ldr(properties, + FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + } else { + // Give up probing if still not found the undefined value. + __ b(ne, miss_label); + } + } __ bind(&done); - __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); - - return result; + __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1); } @@ -162,7 +209,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Register scratch, Register extra, Register extra2) { - Isolate* isolate = masm->isolate(); Label miss; // Make sure that code is valid. The shifting code relies on the @@ -189,7 +235,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, ASSERT(!extra2.is(no_reg)); // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, &miss); // Get the map of the receiver and compute the hash. __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); @@ -201,7 +248,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize)); // Probe the primary table. - ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2); + ProbeTable(masm, flags, kPrimary, name, scratch, extra, extra2); // Primary miss: Compute hash for secondary probe. __ sub(scratch, scratch, Operand(name)); @@ -211,7 +258,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize)); // Probe the secondary table. - ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2); + ProbeTable(masm, flags, kSecondary, name, scratch, extra, extra2); // Cache miss: Fall-through and let caller handle the miss by // entering the runtime system. @@ -239,15 +286,13 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register prototype, Label* miss) { - Isolate* isolate = masm->isolate(); // Check we're still in the same context. __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ Move(ip, isolate->global()); + __ Move(ip, Top::global()); __ cmp(prototype, ip); __ b(ne, miss); // Get the global function with the given index. - JSFunction* function = - JSFunction::cast(isolate->global_context()->get(index)); + JSFunction* function = JSFunction::cast(Top::global_context()->get(index)); // Load its initial map. The global functions all have initial maps. __ Move(prototype, Handle<Map>(function->initial_map())); // Load the prototype from the initial map. @@ -281,7 +326,8 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, Register scratch, Label* miss_label) { // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss_label); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss_label); // Check that the object is a JS array. __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); @@ -303,7 +349,8 @@ static void GenerateStringCheck(MacroAssembler* masm, Label* smi, Label* non_string_object) { // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, smi); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, smi); // Check that the object is a string. __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); @@ -378,7 +425,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Label exit; // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver_reg, miss_label); + __ tst(receiver_reg, Operand(kSmiTagMask)); + __ b(eq, miss_label); // Check that the map of the receiver hasn't changed. __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); @@ -402,10 +450,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ mov(r2, Operand(Handle<Map>(transition))); __ Push(r2, r0); __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), - masm->isolate()), - 3, - 1); + ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), + 3, 1); return; } @@ -427,7 +473,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ str(r0, FieldMemOperand(receiver_reg, offset)); // Skip updating write barrier if storing a smi. - __ JumpIfSmi(r0, &exit); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &exit); // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. @@ -440,7 +487,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ str(r0, FieldMemOperand(scratch, offset)); // Skip updating write barrier if storing a smi. - __ JumpIfSmi(r0, &exit); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &exit); // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. @@ -457,9 +505,9 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); Code* code = NULL; if (kind == Code::LOAD_IC) { - code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss); + code = Builtins::builtin(Builtins::LoadIC_Miss); } else { - code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss); + code = Builtins::builtin(Builtins::KeyedLoadIC_Miss); } Handle<Code> ic(code); @@ -470,8 +518,7 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { static void GenerateCallFunction(MacroAssembler* masm, Object* object, const ParameterCount& arguments, - Label* miss, - Code::ExtraICState extra_ic_state) { + Label* miss) { // ----------- S t a t e ------------- // -- r0: receiver // -- r1: function to call @@ -490,10 +537,7 @@ static void GenerateCallFunction(MacroAssembler* masm, } // Invoke the function. - CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state) - ? CALL_AS_FUNCTION - : CALL_AS_METHOD; - __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind); + __ InvokeFunction(r1, arguments, JUMP_FUNCTION); } @@ -504,7 +548,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, JSObject* holder_obj) { __ push(name); InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); - ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor)); + ASSERT(!Heap::InNewSpace(interceptor)); Register scratch = name; __ mov(scratch, Operand(Handle<Object>(interceptor))); __ push(scratch); @@ -523,8 +567,7 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm, PushInterceptorArguments(masm, receiver, holder, name, holder_obj); ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), - masm->isolate()); + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)); __ mov(r0, Operand(5)); __ mov(r1, Operand(ref)); @@ -573,7 +616,7 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, // Pass the additional arguments FastHandleApiCall expects. Object* call_data = optimization.api_call_info()->data(); Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info()); - if (masm->isolate()->heap()->InNewSpace(call_data)) { + if (Heap::InNewSpace(call_data)) { __ Move(r0, api_call_info_handle); __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); } else { @@ -613,9 +656,8 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, // garbage collection but instead return the allocation failure // object. const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; - ExternalReference ref = ExternalReference(&fun, - ExternalReference::DIRECT_API_CALL, - masm->isolate()); + ExternalReference ref = + ExternalReference(&fun, ExternalReference::DIRECT_API_CALL); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -623,12 +665,10 @@ class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, const ParameterCount& arguments, - Register name, - Code::ExtraICState extra_ic_state) + Register name) : stub_compiler_(stub_compiler), arguments_(arguments), - name_(name), - extra_ic_state_(extra_ic_state) {} + name_(name) {} MaybeObject* Compile(MacroAssembler* masm, JSObject* object, @@ -670,7 +710,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { name, holder, miss); - return masm->isolate()->heap()->undefined_value(); + return Heap::undefined_value(); } } @@ -689,8 +729,6 @@ class CallInterceptorCompiler BASE_EMBEDDED { ASSERT(optimization.is_constant_call()); ASSERT(!lookup->holder()->IsGlobalObject()); - Counters* counters = masm->isolate()->counters(); - int depth1 = kInvalidProtoDepth; int depth2 = kInvalidProtoDepth; bool can_do_fast_api_call = false; @@ -708,11 +746,11 @@ class CallInterceptorCompiler BASE_EMBEDDED { (depth2 != kInvalidProtoDepth); } - __ IncrementCounter(counters->call_const_interceptor(), 1, + __ IncrementCounter(&Counters::call_const_interceptor, 1, scratch1, scratch2); if (can_do_fast_api_call) { - __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1, + __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1, scratch1, scratch2); ReserveSpaceForFastApiCall(masm, scratch1); } @@ -756,11 +794,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { arguments_.immediate()); if (result->IsFailure()) return result; } else { - CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) - ? CALL_AS_FUNCTION - : CALL_AS_METHOD; __ InvokeFunction(optimization.constant_function(), arguments_, - JUMP_FUNCTION, call_kind); + JUMP_FUNCTION); } // Deferred code for fast API call case---clean preallocated space. @@ -776,7 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { FreeSpaceForFastApiCall(masm); } - return masm->isolate()->heap()->undefined_value(); + return Heap::undefined_value(); } void CompileRegular(MacroAssembler* masm, @@ -805,9 +840,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { interceptor_holder); __ CallExternalReference( - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), - masm->isolate()), - 5); + ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForCall)), + 5); // Restore the name_ register. __ pop(name_); @@ -842,7 +877,6 @@ class CallInterceptorCompiler BASE_EMBEDDED { StubCompiler* stub_compiler_; const ParameterCount& arguments_; Register name_; - Code::ExtraICState extra_ic_state_; }; @@ -1046,7 +1080,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, !current->IsJSGlobalObject() && !current->IsJSGlobalProxy()) { if (!name->IsSymbol()) { - MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name); + MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name); Object* lookup_result = NULL; // Initialization to please compiler. if (!maybe_lookup_result->ToObject(&lookup_result)) { set_failure(Failure::cast(maybe_lookup_result)); @@ -1057,21 +1091,16 @@ Register StubCompiler::CheckPrototypes(JSObject* object, ASSERT(current->property_dictionary()->FindEntry(name) == StringDictionary::kNotFound); - MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(), - miss, - reg, - name, - scratch1, - scratch2); - if (negative_lookup->IsFailure()) { - set_failure(Failure::cast(negative_lookup)); - return reg; - } - + GenerateDictionaryNegativeLookup(masm(), + miss, + reg, + name, + scratch1, + scratch2); __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // from now the object is in holder_reg __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); - } else if (heap()->InNewSpace(prototype)) { + } else if (Heap::InNewSpace(prototype)) { // Get the map of the current object. __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); __ cmp(scratch1, Operand(Handle<Map>(current->map()))); @@ -1125,7 +1154,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, __ b(ne, miss); // Log the check depth. - LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); + LOG(IntEvent("check-maps-depth", depth + 1)); // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); @@ -1159,7 +1188,8 @@ void StubCompiler::GenerateLoadField(JSObject* object, String* name, Label* miss) { // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); // Check that the maps haven't changed. Register reg = @@ -1180,7 +1210,8 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, String* name, Label* miss) { // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); // Check that the maps haven't changed. Register reg = @@ -1204,7 +1235,8 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, String* name, Label* miss) { // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); // Check that the maps haven't changed. Register reg = @@ -1216,7 +1248,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ push(receiver); __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_ Handle<AccessorInfo> callback_handle(callback); - if (heap()->InNewSpace(callback_handle->data())) { + if (Heap::InNewSpace(callback_handle->data())) { __ Move(scratch3, callback_handle); __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); } else { @@ -1241,9 +1273,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, // object. const int kStackUnwindSpace = 4; ExternalReference ref = - ExternalReference(&fun, - ExternalReference::DIRECT_GETTER_CALL, - masm()->isolate()); + ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL); return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -1372,8 +1402,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, } ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); __ TailCallExternalReference(ref, 5, 1); } } else { // !compile_followup_inline @@ -1385,9 +1414,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, interceptor_holder); - ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), - masm()->isolate()); + ExternalReference ref = ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); __ TailCallExternalReference(ref, 5, 1); } } @@ -1417,7 +1445,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object, // object which can only happen for contextual calls. In this case, // the receiver cannot be a smi. if (object != holder) { - __ JumpIfSmi(r0, miss); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, miss); } // Check that the maps haven't changed. @@ -1433,13 +1462,14 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); // Check that the cell contains the same function. - if (heap()->InNewSpace(function)) { + if (Heap::InNewSpace(function)) { // We can't embed a pointer to a function in new space so we have // to verify that the shared function info is unchanged. This has // the nice side effect that multiple closures based on the same // function can all use this call IC. Before we load through the // function, we have to verify that it still is a function. - __ JumpIfSmi(r1, miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, miss); __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); __ b(ne, miss); @@ -1456,10 +1486,8 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, MaybeObject* CallStubCompiler::GenerateMissBranch() { - MaybeObject* maybe_obj = - isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(), - kind_, - extra_ic_state_); + MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(), + kind_); Object* obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj; __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET); @@ -1484,18 +1512,21 @@ MaybeObject* CallStubCompiler::CompileCallField(JSObject* object, // Get the receiver of the function from the stack into r0. __ ldr(r0, MemOperand(sp, argc * kPointerSize)); // Check that the receiver isn't a smi. - __ JumpIfSmi(r0, &miss); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &miss); // Do the right check and compute the holder register. Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss); GenerateFastPropertyLoad(masm(), r1, reg, holder, index); - GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_); + GenerateCallFunction(masm(), object, arguments(), &miss); // Handle call cache miss. __ bind(&miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(FIELD, name); @@ -1516,7 +1547,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // ----------------------------------- // If object is not an array, bail out to regular call. - if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value(); + if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value(); Label miss; @@ -1550,11 +1581,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - r0, - Heap::kFixedArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); + __ CheckMap(elements, r0, + Heap::kFixedArrayMapRootIndex, &call_builtin, true); if (argc == 1) { // Otherwise fall through to call the builtin. Label exit, with_write_barrier, attempt_to_grow_elements; @@ -1605,11 +1633,10 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(&call_builtin); } - Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); + ExternalReference::new_space_allocation_top_address(); ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); + ExternalReference::new_space_allocation_limit_address(); const int kAllocationDelta = 4; // Load top and check if it is the end of elements. @@ -1649,16 +1676,17 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ Ret(); } __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush, - masm()->isolate()), + __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush), argc + 1, 1); } // Handle call cache miss. __ bind(&miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(function); @@ -1679,7 +1707,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, // ----------------------------------- // If object is not an array, bail out to regular call. - if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value(); + if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value(); Label miss, return_undefined, call_builtin; @@ -1703,11 +1731,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - r0, - Heap::kFixedArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); + __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true); // Get the array's length into r4 and calculate new length. __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1739,15 +1763,16 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, __ Ret(); __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop, - masm()->isolate()), + __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop), argc + 1, 1); // Handle call cache miss. __ bind(&miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(function); @@ -1769,7 +1794,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall( // ----------------------------------- // If object is not a string, bail out to regular call. - if (!object->IsString() || cell != NULL) return heap()->undefined_value(); + if (!object->IsString() || cell != NULL) return Heap::undefined_value(); const int argc = arguments().immediate(); @@ -1778,9 +1803,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall( Label index_out_of_range; Label* index_out_of_range_label = &index_out_of_range; - if (kind_ == Code::CALL_IC && - (CallICBase::StringStubState::decode(extra_ic_state_) == - DEFAULT_STRING_STUB)) { + if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) { index_out_of_range_label = &miss; } @@ -1832,8 +1855,10 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall( // Restore function name in r2. __ Move(r2, Handle<String>(name)); __ bind(&name_miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(function); @@ -1855,7 +1880,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall( // ----------------------------------- // If object is not a string, bail out to regular call. - if (!object->IsString() || cell != NULL) return heap()->undefined_value(); + if (!object->IsString() || cell != NULL) return Heap::undefined_value(); const int argc = arguments().immediate(); @@ -1864,9 +1889,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall( Label index_out_of_range; Label* index_out_of_range_label = &index_out_of_range; - if (kind_ == Code::CALL_IC && - (CallICBase::StringStubState::decode(extra_ic_state_) == - DEFAULT_STRING_STUB)) { + if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) { index_out_of_range_label = &miss; } @@ -1920,8 +1943,10 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall( // Restore function name in r2. __ Move(r2, Handle<String>(name)); __ bind(&name_miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(function); @@ -1946,7 +1971,7 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall( // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. - if (!object->IsJSObject() || argc != 1) return heap()->undefined_value(); + if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); Label miss; GenerateNameCheck(name, &miss); @@ -1955,7 +1980,8 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall( __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(r1, &miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, &miss); @@ -1972,7 +1998,8 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall( // Check the code is a smi. Label slow; STATIC_ASSERT(kSmiTag == 0); - __ JumpIfNotSmi(code, &slow); + __ tst(code, Operand(kSmiTagMask)); + __ b(ne, &slow); // Convert the smi code to uint16. __ and_(code, code, Operand(Smi::FromInt(0xffff))); @@ -1988,12 +2015,14 @@ MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD); + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); __ bind(&miss); // r2: function name. - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); @@ -2013,17 +2042,14 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object, // -- sp[argc * 4] : receiver // ----------------------------------- - if (!CpuFeatures::IsSupported(VFP3)) { - return heap()->undefined_value(); - } - + if (!CpuFeatures::IsSupported(VFP3)) return Heap::undefined_value(); CpuFeatures::Scope scope_vfp3(VFP3); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. - if (!object->IsJSObject() || argc != 1) return heap()->undefined_value(); + if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); Label miss, slow; GenerateNameCheck(name, &miss); @@ -2051,7 +2077,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object, __ Drop(argc + 1, eq); __ Ret(eq); - __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); + __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true); Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return; @@ -2136,12 +2162,12 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object, __ bind(&slow); // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD); + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); __ bind(&miss); // r2: function name. - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + MaybeObject* obj = GenerateMissBranch(); + if (obj->IsFailure()) return obj; // Return the generated code. return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); @@ -2165,7 +2191,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. - if (!object->IsJSObject() || argc != 1) return heap()->undefined_value(); + if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); Label miss; GenerateNameCheck(name, &miss); @@ -2174,7 +2200,8 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(r1, &miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, &miss); @@ -2211,7 +2238,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, // Check if the argument is a heap number and load its exponent and // sign. __ bind(¬_smi); - __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); + __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true); __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset)); // Check the sign of the argument. If the argument is positive, @@ -2237,72 +2264,20 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD); + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); __ bind(&miss); // r2: function name. - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); } -MaybeObject* CallStubCompiler::CompileFastApiCall( - const CallOptimization& optimization, - Object* object, - JSObject* holder, - JSGlobalPropertyCell* cell, - JSFunction* function, - String* name) { - Counters* counters = isolate()->counters(); - - ASSERT(optimization.is_simple_api_call()); - // Bail out if object is a global object as we don't want to - // repatch it to global receiver. - if (object->IsGlobalObject()) return heap()->undefined_value(); - if (cell != NULL) return heap()->undefined_value(); - if (!object->IsJSObject()) return heap()->undefined_value(); - int depth = optimization.GetPrototypeDepthOfExpectedType( - JSObject::cast(object), holder); - if (depth == kInvalidProtoDepth) return heap()->undefined_value(); - - Label miss, miss_before_stack_reserved; - - GenerateNameCheck(name, &miss_before_stack_reserved); - - // Get the receiver from the stack. - const int argc = arguments().immediate(); - __ ldr(r1, MemOperand(sp, argc * kPointerSize)); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(r1, &miss_before_stack_reserved); - - __ IncrementCounter(counters->call_const(), 1, r0, r3); - __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3); - - ReserveSpaceForFastApiCall(masm(), r0); - - // Check that the maps haven't changed and find a Holder as a side effect. - CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, - depth, &miss); - - MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc); - if (result->IsFailure()) return result; - - __ bind(&miss); - FreeSpaceForFastApiCall(masm()); - - __ bind(&miss_before_stack_reserved); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; - - // Return the generated code. - return GetCode(function); -} - - MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, @@ -2312,18 +2287,22 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, // -- r2 : name // -- lr : return address // ----------------------------------- - if (HasCustomCallGenerator(function)) { + SharedFunctionInfo* function_info = function->shared(); + if (function_info->HasBuiltinFunctionId()) { + BuiltinFunctionId id = function_info->builtin_function_id(); MaybeObject* maybe_result = CompileCustomCall( - object, holder, NULL, function, name); + id, object, holder, NULL, function, name); Object* result; if (!maybe_result->ToObject(&result)) return maybe_result; // undefined means bail out to regular compiler. - if (!result->IsUndefined()) return result; + if (!result->IsUndefined()) { + return result; + } } - Label miss; + Label miss_in_smi_check; - GenerateNameCheck(name, &miss); + GenerateNameCheck(name, &miss_in_smi_check); // Get the receiver from the stack const int argc = arguments().immediate(); @@ -2331,26 +2310,40 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, // Check that the receiver isn't a smi. if (check != NUMBER_CHECK) { - __ JumpIfSmi(r1, &miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss_in_smi_check); } // Make sure that it's okay not to patch the on stack receiver // unless we're doing a receiver map check. ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); - SharedFunctionInfo* function_info = function->shared(); + CallOptimization optimization(function); + int depth = kInvalidProtoDepth; + Label miss; + switch (check) { case RECEIVER_MAP_CHECK: - __ IncrementCounter(masm()->isolate()->counters()->call_const(), - 1, r0, r3); + __ IncrementCounter(&Counters::call_const, 1, r0, r3); + + if (optimization.is_simple_api_call() && !object->IsGlobalObject()) { + depth = optimization.GetPrototypeDepthOfExpectedType( + JSObject::cast(object), holder); + } + + if (depth != kInvalidProtoDepth) { + __ IncrementCounter(&Counters::call_const_fast_api, 1, r0, r3); + ReserveSpaceForFastApiCall(masm(), r0); + } // Check that the maps haven't changed. CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, - &miss); + depth, &miss); // Patch the receiver on the stack with the global proxy if // necessary. if (object->IsGlobalObject()) { + ASSERT(depth == kInvalidProtoDepth); __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); __ str(r3, MemOperand(sp, argc * kPointerSize)); } @@ -2364,7 +2357,7 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } else { // Check that the object is a two-byte string or a symbol. __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); - __ b(ge, &miss); + __ b(hs, &miss); // Check that the maps starting from the prototype haven't changed. GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); @@ -2381,7 +2374,8 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } else { Label fast; // Check that the object is a smi or a heap number. - __ JumpIfSmi(r1, &fast); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &fast); __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE); __ b(ne, &miss); __ bind(&fast); @@ -2422,15 +2416,24 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, UNREACHABLE(); } - CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) - ? CALL_AS_FUNCTION - : CALL_AS_METHOD; - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind); + if (depth != kInvalidProtoDepth) { + MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc); + if (result->IsFailure()) return result; + } else { + __ InvokeFunction(function, arguments(), JUMP_FUNCTION); + } // Handle call cache miss. __ bind(&miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + if (depth != kInvalidProtoDepth) { + FreeSpaceForFastApiCall(masm()); + } + + __ bind(&miss_in_smi_check); + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(function); @@ -2458,7 +2461,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object, // Get the receiver from the stack. __ ldr(r1, MemOperand(sp, argc * kPointerSize)); - CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_); + CallInterceptorCompiler compiler(this, arguments(), r2); MaybeObject* result = compiler.Compile(masm(), object, holder, @@ -2478,12 +2481,14 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object, // Restore receiver. __ ldr(r0, MemOperand(sp, argc * kPointerSize)); - GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_); + GenerateCallFunction(masm(), object, arguments(), &miss); // Handle call cache miss. __ bind(&miss); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(INTERCEPTOR, name); @@ -2500,9 +2505,11 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object, // -- lr : return address // ----------------------------------- - if (HasCustomCallGenerator(function)) { + SharedFunctionInfo* function_info = function->shared(); + if (function_info->HasBuiltinFunctionId()) { + BuiltinFunctionId id = function_info->builtin_function_id(); MaybeObject* maybe_result = CompileCustomCall( - object, holder, cell, function, name); + id, object, holder, cell, function, name); Object* result; if (!maybe_result->ToObject(&result)) return maybe_result; // undefined means bail out to regular compiler. @@ -2531,31 +2538,28 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object, __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->call_global_inline(), 1, r3, r4); + __ IncrementCounter(&Counters::call_global_inline, 1, r3, r4); ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); - CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) - ? CALL_AS_FUNCTION - : CALL_AS_METHOD; if (V8::UseCrankshaft()) { // TODO(kasperl): For now, we always call indirectly through the // code field in the function to allow recompilation to take effect // without changing any of the call sites. __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); - __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION, - NullCallWrapper(), call_kind); + __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION); } else { - __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET, - JUMP_FUNCTION, call_kind); + __ InvokeCode(code, expected, arguments(), + RelocInfo::CODE_TARGET, JUMP_FUNCTION); } // Handle call cache miss. __ bind(&miss); - __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3); - MaybeObject* maybe_result = GenerateMissBranch(); - if (maybe_result->IsFailure()) return maybe_result; + __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3); + Object* obj; + { MaybeObject* maybe_obj = GenerateMissBranch(); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } // Return the generated code. return GetCode(NORMAL, name); @@ -2581,7 +2585,7 @@ MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object, r1, r2, r3, &miss); __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -2601,7 +2605,8 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object, Label miss; // Check that the object isn't a smi. - __ JumpIfSmi(r1, &miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); // Check that the map of the object hasn't changed. __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -2623,13 +2628,12 @@ MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference store_callback_property = - ExternalReference(IC_Utility(IC::kStoreCallbackProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreCallbackProperty)); __ TailCallExternalReference(store_callback_property, 4, 1); // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -2648,7 +2652,8 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, Label miss; // Check that the object isn't a smi. - __ JumpIfSmi(r1, &miss); + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); // Check that the map of the object hasn't changed. __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -2671,13 +2676,12 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, // Do tail-call to the runtime system. ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -2714,14 +2718,13 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); + __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3); __ Ret(); // Handle store cache miss. __ bind(&miss); - __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); + __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -2739,7 +2742,8 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name, Label miss; // Check that receiver is not a smi. - __ JumpIfSmi(r0, &miss); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &miss); // Check the maps of the full prototype chain. CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss); @@ -2767,7 +2771,7 @@ MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name, GenerateLoadMiss(masm(), Code::LOAD_IC); // Return the generated code. - return GetCode(NONEXISTENT, heap()->empty_string()); + return GetCode(NONEXISTENT, Heap::empty_string()); } @@ -2883,7 +2887,8 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object, // object which can only happen for contextual calls. In this case, // the receiver cannot be a smi. if (object != holder) { - __ JumpIfSmi(r0, &miss); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &miss); } // Check that the map of the global has not changed. @@ -2901,12 +2906,11 @@ MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } __ mov(r0, r4); - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); + __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3); __ Ret(); __ bind(&miss); - __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3); + __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3); GenerateLoadMiss(masm(), Code::LOAD_IC); // Return the generated code. @@ -3051,9 +3055,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { // -- r1 : receiver // ----------------------------------- Label miss; - - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_string_length, 1, r2, r3); // Check the key is the cached one. __ cmp(r0, Operand(Handle<String>(name))); @@ -3061,7 +3063,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true); __ bind(&miss); - __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3); + __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -3077,8 +3079,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { // ----------------------------------- Label miss; - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_function_prototype, 1, r2, r3); // Check the name hasn't changed. __ cmp(r0, Operand(Handle<String>(name))); @@ -3086,63 +3087,91 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss); __ bind(&miss); - __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); + __ DecrementCounter(&Counters::keyed_load_function_prototype, 1, r2, r3); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); return GetCode(CALLBACKS, name); } -MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { +MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key // -- r1 : receiver // ----------------------------------- - Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map); - if (!maybe_stub->To(&stub)) return maybe_stub; - __ DispatchMap(r1, - r2, - Handle<Map>(receiver_map), - Handle<Code>(stub), - DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + Label miss; + + // Check that the receiver isn't a smi. + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); + + // Check that the map matches. + __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ cmp(r2, Operand(Handle<Map>(receiver->map()))); + __ b(ne, &miss); + + // Check that the key is a smi. + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &miss); + + // Get the elements array. + __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ AssertFastElements(r2); + + // Check that the key is within bounds. + __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); + __ cmp(r0, Operand(r3)); + __ b(hs, &miss); + + // Load the result and make sure it's not the hole. + __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ ldr(r4, + MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r4, ip); + __ b(eq, &miss); + __ mov(r0, r4); + __ Ret(); + + __ bind(&miss); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); // Return the generated code. return GetCode(NORMAL, NULL); } -MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( - MapList* receiver_maps, - CodeList* handler_ics) { +MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key // -- r1 : receiver // ----------------------------------- Label miss; - __ JumpIfSmi(r1, &miss); - int receiver_count = receiver_maps->length(); - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map(receiver_maps->at(current)); - Handle<Code> code(handler_ics->at(current)); - __ mov(ip, Operand(map)); - __ cmp(r2, ip); - __ Jump(code, RelocInfo::CODE_TARGET, eq); - } + // Check that the map matches. + __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false); + + GenerateFastPixelArrayLoad(masm(), + r1, + r0, + r2, + r3, + r4, + r5, + r0, + &miss, + &miss, + &miss); __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. - return GetCode(NORMAL, NULL, MEGAMORPHIC); + return GetCode(NORMAL, NULL); } @@ -3158,8 +3187,7 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, // ----------------------------------- Label miss; - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4); + __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4); // Check that the name has not changed. __ cmp(r1, Operand(Handle<String>(name))); @@ -3175,8 +3203,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, &miss); __ bind(&miss); - __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4); - Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss(); + __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -3184,24 +3213,69 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, } -MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { +MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized( + JSObject* receiver) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key // -- r2 : receiver // -- lr : return address // -- r3 : scratch + // -- r4 : scratch (elements) // ----------------------------------- - Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map); - if (!maybe_stub->To(&stub)) return maybe_stub; - __ DispatchMap(r2, - r3, - Handle<Map>(receiver_map), - Handle<Code>(stub), - DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); + Label miss; + + Register value_reg = r0; + Register key_reg = r1; + Register receiver_reg = r2; + Register scratch = r3; + Register elements_reg = r4; + + // Check that the receiver isn't a smi. + __ tst(receiver_reg, Operand(kSmiTagMask)); + __ b(eq, &miss); + + // Check that the map matches. + __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + __ cmp(scratch, Operand(Handle<Map>(receiver->map()))); + __ b(ne, &miss); + + // Check that the key is a smi. + __ tst(key_reg, Operand(kSmiTagMask)); + __ b(ne, &miss); + + // Get the elements array and make sure it is a fast element array, not 'cow'. + __ ldr(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset)); + __ cmp(scratch, Operand(Handle<Map>(Factory::fixed_array_map()))); + __ b(ne, &miss); + + // Check that the key is within bounds. + if (receiver->IsJSArray()) { + __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + } else { + __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); + } + // Compare smis. + __ cmp(key_reg, scratch); + __ b(hs, &miss); + + __ add(scratch, + elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ str(value_reg, + MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ RecordWrite(scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), + receiver_reg , elements_reg); + + // value_reg (r0) is preserved. + // Done. + __ Ret(); + + __ bind(&miss); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -3209,35 +3283,44 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( - MapList* receiver_maps, - CodeList* handler_ics) { +MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray( + JSObject* receiver) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key // -- r2 : receiver - // -- lr : return address // -- r3 : scratch + // -- r4 : scratch + // -- r5 : scratch + // -- r6 : scratch + // -- lr : return address // ----------------------------------- Label miss; - __ JumpIfSmi(r2, &miss); - - int receiver_count = receiver_maps->length(); - __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map(receiver_maps->at(current)); - Handle<Code> code(handler_ics->at(current)); - __ mov(ip, Operand(map)); - __ cmp(r3, ip); - __ Jump(code, RelocInfo::CODE_TARGET, eq); - } + + // Check that the map matches. + __ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false); + + GenerateFastPixelArrayStore(masm(), + r2, + r1, + r0, + r3, + r4, + r5, + r6, + true, + true, + &miss, + &miss, + NULL, + &miss); __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. - return GetCode(NORMAL, NULL, MEGAMORPHIC); + return GetCode(NORMAL, NULL); } @@ -3267,7 +3350,8 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // r1: constructor function // r7: undefined __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &generic_stub_call); + __ tst(r2, Operand(kSmiTagMask)); + __ b(eq, &generic_stub_call); __ CompareObjectType(r2, r3, r4, MAP_TYPE); __ b(ne, &generic_stub_call); @@ -3368,80 +3452,85 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // Remove caller arguments and receiver from the stack and return. __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); __ add(sp, sp, Operand(kPointerSize)); - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->constructed_objects(), 1, r1, r2); - __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); + __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2); + __ IncrementCounter(&Counters::constructed_objects_stub, 1, r1, r2); __ Jump(lr); // Jump to the generic stub in case the specialized code cannot handle the // construction. __ bind(&generic_stub_call); - Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(code, RelocInfo::CODE_TARGET); + Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); + Handle<Code> generic_construct_stub(code); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); // Return the generated code. return GetCode(); } -#undef __ -#define __ ACCESS_MASM(masm) - - -static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) { - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: - case JSObject::EXTERNAL_SHORT_ELEMENTS: - case JSObject::EXTERNAL_INT_ELEMENTS: +static bool IsElementTypeSigned(ExternalArrayType array_type) { + switch (array_type) { + case kExternalByteArray: + case kExternalShortArray: + case kExternalIntArray: return true; - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - case JSObject::EXTERNAL_PIXEL_ELEMENTS: + case kExternalUnsignedByteArray: + case kExternalUnsignedShortArray: + case kExternalUnsignedIntArray: return false; - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - case JSObject::FAST_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: + default: UNREACHABLE(); return false; } - return false; } -void KeyedLoadStubCompiler::GenerateLoadExternalArray( - MacroAssembler* masm, - JSObject::ElementsKind elements_kind) { +MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub( + ExternalArrayType array_type, Code::Flags flags) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key // -- r1 : receiver // ----------------------------------- - Label miss_force_generic, slow, failed_allocation; + Label slow, failed_allocation; Register key = r0; Register receiver = r1; - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. + // Check that the object isn't a smi + __ JumpIfSmi(receiver, &slow); // Check that the key is a smi. - __ JumpIfNotSmi(key, &miss_force_generic); + __ JumpIfNotSmi(key, &slow); + + // Check that the object is a JS object. Load map into r2. + __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + + // Check that the receiver does not require access checks. We need + // to check this explicitly since this generic stub does not perform + // map checks. + __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); + __ b(ne, &slow); + // Check that the elements array is the appropriate type of + // ExternalArray. __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // r3: elements array + __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); + __ cmp(r2, ip); + __ b(ne, &slow); // Check that the index is in range. __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); __ cmp(ip, Operand(key, ASR, kSmiTagSize)); // Unsigned comparison catches both negative and too-large values. - __ b(lo, &miss_force_generic); + __ b(lo, &slow); + // r3: elements array __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); // r3: base pointer of external storage @@ -3450,25 +3539,24 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); Register value = r2; - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: + switch (array_type) { + case kExternalByteArray: __ ldrsb(value, MemOperand(r3, key, LSR, 1)); break; - case JSObject::EXTERNAL_PIXEL_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case kExternalUnsignedByteArray: __ ldrb(value, MemOperand(r3, key, LSR, 1)); break; - case JSObject::EXTERNAL_SHORT_ELEMENTS: + case kExternalShortArray: __ ldrsh(value, MemOperand(r3, key, LSL, 0)); break; - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case kExternalUnsignedShortArray: __ ldrh(value, MemOperand(r3, key, LSL, 0)); break; - case JSObject::EXTERNAL_INT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: + case kExternalIntArray: + case kExternalUnsignedIntArray: __ ldr(value, MemOperand(r3, key, LSL, 1)); break; - case JSObject::EXTERNAL_FLOAT_ELEMENTS: + case kExternalFloatArray: if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); __ add(r2, r3, Operand(key, LSL, 1)); @@ -3477,36 +3565,18 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ ldr(value, MemOperand(r3, key, LSL, 1)); } break; - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ add(r2, r3, Operand(key, LSL, 2)); - __ vldr(d0, r2, 0); - } else { - __ add(r4, r3, Operand(key, LSL, 2)); - // r4: pointer to the beginning of the double we want to load. - __ ldr(r2, MemOperand(r4, 0)); - __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); - } - break; - case JSObject::FAST_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: + default: UNREACHABLE(); break; } // For integer array types: // r2: value - // For float array type: + // For floating-point array type // s0: value (if VFP3 is supported) // r2: value (if VFP3 is not supported) - // For double array type: - // d0: value (if VFP3 is supported) - // r2/r3: value (if VFP3 is not supported) - if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) { + if (array_type == kExternalIntArray) { // For the Int and UnsignedInt array types, we need to see whether // the value can be represented in a Smi. If not, we need to convert // it to a HeapNumber. @@ -3534,23 +3604,10 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ vstr(d0, r3, HeapNumber::kValueOffset); __ Ret(); } else { - Register dst1 = r1; - Register dst2 = r3; - FloatingPointHelper::Destination dest = - FloatingPointHelper::kCoreRegisters; - FloatingPointHelper::ConvertIntToDouble(masm, - value, - dest, - d0, - dst1, - dst2, - r9, - s0); - __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ Ret(); + WriteInt32ToHeapNumberStub stub(value, r0, r3); + __ TailCallStub(&stub); } - } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) { + } else if (array_type == kExternalUnsignedIntArray) { // The test is different for unsigned int values. Since we need // the value to be in the range of a positive smi, we can't // handle either of the top two bits being set in the value. @@ -3593,12 +3650,12 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ bind(&box_int_0); // Integer does not have leading zeros. - GenerateUInt2Double(masm, hiword, loword, r4, 0); + GenerateUInt2Double(masm(), hiword, loword, r4, 0); __ b(&done); __ bind(&box_int_1); // Integer has one leading zero. - GenerateUInt2Double(masm, hiword, loword, r4, 1); + GenerateUInt2Double(masm(), hiword, loword, r4, 1); __ bind(&done); @@ -3615,7 +3672,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ mov(r0, r4); __ Ret(); } - } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { + } else if (array_type == kExternalFloatArray) { // For the floating-point array type, we need to always allocate a // HeapNumber. if (CpuFeatures::IsSupported(VFP3)) { @@ -3685,31 +3742,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ mov(r0, r3); __ Ret(); } - } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); - __ sub(r1, r2, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); - - __ mov(r0, r2); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r6, r7, &slow); - - __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); - __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); - __ mov(r0, r4); - __ Ret(); - } } else { // Tag integer as smi and return it. @@ -3719,9 +3751,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // Slow case, key and receiver still in r0 and r1. __ bind(&slow); - __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), - 1, r2, r3); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); // ---------- S t a t e -------------- // -- lr : return address @@ -3733,23 +3763,19 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); - __ bind(&miss_force_generic); - Code* stub = masm->isolate()->builtins()->builtin( - Builtins::kKeyedLoadIC_MissForceGeneric); - __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET); + return GetCode(flags); } -void KeyedStoreStubCompiler::GenerateStoreExternalArray( - MacroAssembler* masm, - JSObject::ElementsKind elements_kind) { +MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub( + ExternalArrayType array_type, Code::Flags flags) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, check_heap_number, miss_force_generic; + Label slow, check_heap_number; // Register usage. Register value = r0; @@ -3757,84 +3783,65 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( Register receiver = r2; // r3 mostly holds the elements array or the destination external array. - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. + // Check that the object isn't a smi. + __ JumpIfSmi(receiver, &slow); - __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + // Check that the object is a JS object. Load map into r3. + __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); + __ b(le, &slow); + + // Check that the receiver does not require access checks. We need + // to do this because this generic stub does not perform map checks. + __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); + __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); + __ b(ne, &slow); // Check that the key is a smi. - __ JumpIfNotSmi(key, &miss_force_generic); + __ JumpIfNotSmi(key, &slow); + + // Check that the elements array is the appropriate type of ExternalArray. + __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); + __ cmp(r4, ip); + __ b(ne, &slow); - // Check that the index is in range - __ SmiUntag(r4, key); + // Check that the index is in range. + __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); __ cmp(r4, ip); // Unsigned comparison catches both negative and too-large values. - __ b(hs, &miss_force_generic); + __ b(hs, &slow); // Handle both smis and HeapNumbers in the fast path. Go to the // runtime for all other kinds of values. // r3: external array. // r4: key (integer). - if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) { - // Double to pixel conversion is only implemented in the runtime for now. - __ JumpIfNotSmi(value, &slow); - } else { - __ JumpIfNotSmi(value, &check_heap_number); - } - __ SmiUntag(r5, value); + __ JumpIfNotSmi(value, &check_heap_number); + __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); // r3: base pointer of external storage. // r4: key (integer). // r5: value (integer). - switch (elements_kind) { - case JSObject::EXTERNAL_PIXEL_ELEMENTS: - // Clamp the value to [0..255]. - __ Usat(r5, 8, Operand(r5)); - __ strb(r5, MemOperand(r3, r4, LSL, 0)); - break; - case JSObject::EXTERNAL_BYTE_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: __ strb(r5, MemOperand(r3, r4, LSL, 0)); break; - case JSObject::EXTERNAL_SHORT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case kExternalShortArray: + case kExternalUnsignedShortArray: __ strh(r5, MemOperand(r3, r4, LSL, 1)); break; - case JSObject::EXTERNAL_INT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: + case kExternalIntArray: + case kExternalUnsignedIntArray: __ str(r5, MemOperand(r3, r4, LSL, 2)); break; - case JSObject::EXTERNAL_FLOAT_ELEMENTS: + case kExternalFloatArray: // Perform int-to-float conversion and store to memory. - StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); + StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9); break; - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - __ add(r3, r3, Operand(r4, LSL, 3)); - // r3: effective address of the double element - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP3)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } - FloatingPointHelper::ConvertIntToDouble( - masm, r5, destination, - d0, r6, r7, // These are: double_dst, dst1, dst2. - r4, s2); // These are: scratch2, single_scratch. - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP3); - __ vstr(d0, r3, 0); - } else { - __ str(r6, MemOperand(r3, 0)); - __ str(r7, MemOperand(r3, Register::kSizeInBytes)); - } - break; - case JSObject::FAST_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: + default: UNREACHABLE(); break; } @@ -3842,341 +3849,224 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // Entry registers are intact, r0 holds the value which is the return value. __ Ret(); - if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) { - // r3: external array. - // r4: index (integer). - __ bind(&check_heap_number); - __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); - __ b(ne, &slow); - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + // r3: external array. + // r4: index (integer). + __ bind(&check_heap_number); + __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); + __ b(ne, &slow); - // r3: base pointer of external storage. - // r4: key (integer). + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); - // The WebGL specification leaves the behavior of storing NaN and - // +/-Infinity into integer arrays basically undefined. For more - // reproducible behavior, convert these to zero. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); + // r3: base pointer of external storage. + // r4: key (integer). - if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { - // vldr requires offset to be a multiple of 4 so we can not - // include -kHeapObjectTag into it. - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(r4, LSL, 2)); - __ vcvt_f32_f64(s0, d0); - __ vstr(s0, r5, 0); - } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { - __ sub(r5, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ add(r5, r3, Operand(r4, LSL, 3)); - __ vstr(d0, r5, 0); - } else { - // Hoisted load. vldr requires offset to be a multiple of 4 so we can - // not include -kHeapObjectTag into it. - __ sub(r5, value, Operand(kHeapObjectTag)); - __ vldr(d0, r5, HeapNumber::kValueOffset); - __ EmitECMATruncate(r5, d0, s2, r6, r7, r9); - - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(r5, MemOperand(r3, r4, LSL, 0)); - break; - case JSObject::EXTERNAL_SHORT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(r5, MemOperand(r3, r4, LSL, 1)); - break; - case JSObject::EXTERNAL_INT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(r5, MemOperand(r3, r4, LSL, 2)); - break; - case JSObject::EXTERNAL_PIXEL_ELEMENTS: - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - case JSObject::FAST_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } + // The WebGL specification leaves the behavior of storing NaN and + // +/-Infinity into integer arrays basically undefined. For more + // reproducible behavior, convert these to zero. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); + + if (array_type == kExternalFloatArray) { + // vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(r4, LSL, 2)); + __ vcvt_f32_f64(s0, d0); + __ vstr(s0, r5, 0); } else { - // VFP3 is not available do manual conversions. - __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); - __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); - - if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) { - Label done, nan_or_infinity_or_zero; - static const int kMantissaInHiWordShift = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaInLoWordShift = - kBitsPerInt - kMantissaInHiWordShift; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ b(eq, &nan_or_infinity_or_zero); - - __ teq(r9, Operand(r7)); - __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); - __ b(eq, &nan_or_infinity_or_zero); - - // Rebias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ add(r9, - r9, - Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); - - __ cmp(r9, Operand(kBinary32MaxExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); - __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); - __ b(gt, &done); - - __ cmp(r9, Operand(kBinary32MinExponent)); - __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); - __ b(lt, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); - __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); - - __ bind(&done); - __ str(r5, MemOperand(r3, r4, LSL, 2)); - // Entry registers are intact, r0 holds the value which is the return - // value. - __ Ret(); - - __ bind(&nan_or_infinity_or_zero); - __ and_(r7, r5, Operand(HeapNumber::kSignMask)); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r9, r9, r7); - __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); - __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); - __ b(&done); - } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) { - __ add(r7, r3, Operand(r4, LSL, 3)); - // r7: effective address of destination element. - __ str(r6, MemOperand(r7, 0)); - __ str(r5, MemOperand(r7, Register::kSizeInBytes)); - __ Ret(); + // Need to perform float-to-int conversion. + // Test for NaN or infinity (both give zero). + __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset)); + + // Hoisted load. vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r5, value, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); + + __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs and Infinities have all-one exponents so they sign extend to -1. + __ cmp(r6, Operand(-1)); + __ mov(r5, Operand(0), LeaveCC, eq); + + // Not infinity or NaN simply convert to int. + if (IsElementTypeSigned(array_type)) { + __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne); } else { - bool is_signed_type = IsElementTypeSigned(elements_kind); - int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; - int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; - - Label done, sign; - - // Test for all special exponent values: zeros, subnormal numbers, NaNs - // and infinities. All these should be converted to 0. - __ mov(r7, Operand(HeapNumber::kExponentMask)); - __ and_(r9, r5, Operand(r7), SetCC); - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); - __ b(eq, &done); - - __ teq(r9, Operand(r7)); - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); - __ b(eq, &done); - - // Unbias exponent. - __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); - __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); - // If exponent is negative then result is 0. - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); - __ b(mi, &done); - - // If exponent is too big then result is minimal value. - __ cmp(r9, Operand(meaningfull_bits - 1)); - __ mov(r5, Operand(min_value), LeaveCC, ge); - __ b(ge, &done); - - __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); - __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); - __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); - - __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); - __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); - __ b(pl, &sign); - - __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); - __ mov(r5, Operand(r5, LSL, r9)); - __ rsb(r9, r9, Operand(meaningfull_bits)); - __ orr(r5, r5, Operand(r6, LSR, r9)); - - __ bind(&sign); - __ teq(r7, Operand(0, RelocInfo::NONE)); - __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); - - __ bind(&done); - switch (elements_kind) { - case JSObject::EXTERNAL_BYTE_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ strb(r5, MemOperand(r3, r4, LSL, 0)); - break; - case JSObject::EXTERNAL_SHORT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ strh(r5, MemOperand(r3, r4, LSL, 1)); - break; - case JSObject::EXTERNAL_INT_ELEMENTS: - case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ str(r5, MemOperand(r3, r4, LSL, 2)); - break; - case JSObject::EXTERNAL_PIXEL_ELEMENTS: - case JSObject::EXTERNAL_FLOAT_ELEMENTS: - case JSObject::EXTERNAL_DOUBLE_ELEMENTS: - case JSObject::FAST_ELEMENTS: - case JSObject::FAST_DOUBLE_ELEMENTS: - case JSObject::DICTIONARY_ELEMENTS: - case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } + __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne); + } + __ vmov(r5, s0, ne); + + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r5, MemOperand(r3, r4, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r5, MemOperand(r3, r4, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r5, MemOperand(r3, r4, LSL, 2)); + break; + default: + UNREACHABLE(); + break; } } - } - - // Slow case, key and receiver still in r0 and r1. - __ bind(&slow); - __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), - 1, r2, r3); - - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); - // Miss case, call the runtime. - __ bind(&miss_force_generic); + // Entry registers are intact, r0 holds the value which is the return value. + __ Ret(); + } else { + // VFP3 is not available do manual conversions. + __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); + __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- + if (array_type == kExternalFloatArray) { + Label done, nan_or_infinity_or_zero; + static const int kMantissaInHiWordShift = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); -} + static const int kMantissaInLoWordShift = + kBitsPerInt - kMantissaInHiWordShift; + // Test for all special exponent values: zeros, subnormal numbers, NaNs + // and infinities. All these should be converted to 0. + __ mov(r7, Operand(HeapNumber::kExponentMask)); + __ and_(r9, r5, Operand(r7), SetCC); + __ b(eq, &nan_or_infinity_or_zero); -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. + __ teq(r9, Operand(r7)); + __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); + __ b(eq, &nan_or_infinity_or_zero); - // Check that the key is a smi. - __ JumpIfNotSmi(r0, &miss_force_generic); - - // Get the elements array. - __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ AssertFastElements(r2); + // Rebias exponent. + __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); + __ add(r9, + r9, + Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); + + __ cmp(r9, Operand(kBinary32MaxExponent)); + __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); + __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); + __ b(gt, &done); + + __ cmp(r9, Operand(kBinary32MinExponent)); + __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); + __ b(lt, &done); + + __ and_(r7, r5, Operand(HeapNumber::kSignMask)); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); + __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); + __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); - // Check that the key is within bounds. - __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); - __ cmp(r0, Operand(r3)); - __ b(hs, &miss_force_generic); + __ bind(&done); + __ str(r5, MemOperand(r3, r4, LSL, 2)); + // Entry registers are intact, r0 holds the value which is the return + // value. + __ Ret(); - // Load the result and make sure it's not the hole. - __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ ldr(r4, - MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r4, ip); - __ b(eq, &miss_force_generic); - __ mov(r0, r4); - __ Ret(); + __ bind(&nan_or_infinity_or_zero); + __ and_(r7, r5, Operand(HeapNumber::kSignMask)); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r9, r9, r7); + __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); + __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); + __ b(&done); + } else { + bool is_signed_type = IsElementTypeSigned(array_type); + int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; + int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; + + Label done, sign; + + // Test for all special exponent values: zeros, subnormal numbers, NaNs + // and infinities. All these should be converted to 0. + __ mov(r7, Operand(HeapNumber::kExponentMask)); + __ and_(r9, r5, Operand(r7), SetCC); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ b(eq, &done); + + __ teq(r9, Operand(r7)); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ b(eq, &done); + + // Unbias exponent. + __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); + __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); + // If exponent is negative then result is 0. + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); + __ b(mi, &done); + + // If exponent is too big then result is minimal value. + __ cmp(r9, Operand(meaningfull_bits - 1)); + __ mov(r5, Operand(min_value), LeaveCC, ge); + __ b(ge, &done); + + __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); + + __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); + __ b(pl, &sign); + + __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); + __ mov(r5, Operand(r5, LSL, r9)); + __ rsb(r9, r9, Operand(meaningfull_bits)); + __ orr(r5, r5, Operand(r6, LSR, r9)); + + __ bind(&sign); + __ teq(r7, Operand(0, RelocInfo::NONE)); + __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); - __ bind(&miss_force_generic); - Code* stub = masm->isolate()->builtins()->builtin( - Builtins::kKeyedLoadIC_MissForceGeneric); - __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET); -} + __ bind(&done); + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r5, MemOperand(r3, r4, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r5, MemOperand(r3, r4, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r5, MemOperand(r3, r4, LSL, 2)); + break; + default: + UNREACHABLE(); + break; + } + } + } + // Slow case: call runtime. + __ bind(&slow); -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : scratch - // -- r4 : scratch (elements) + // Entry registers are intact. + // ---------- S t a t e -------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // -- lr : return address // ----------------------------------- - Label miss_force_generic; - Register value_reg = r0; - Register key_reg = r1; - Register receiver_reg = r2; - Register scratch = r3; - Register elements_reg = r4; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. + // Push receiver, key and value for runtime call. + __ Push(r2, r1, r0); - // Check that the key is a smi. - __ JumpIfNotSmi(r0, &miss_force_generic); - - // Get the elements array and make sure it is a fast element array, not 'cow'. - __ ldr(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ CheckMap(elements_reg, - scratch, - Heap::kFixedArrayMapRootIndex, - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Check that the key is within bounds. - if (is_js_array) { - __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); - } else { - __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); - } - // Compare smis. - __ cmp(key_reg, scratch); - __ b(hs, &miss_force_generic); - - __ add(scratch, - elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ str(value_reg, - MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ RecordWrite(scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), - receiver_reg , elements_reg); + __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes + __ mov(r0, Operand(Smi::FromInt( + Code::ExtractExtraICStateFromFlags(flags) & kStrictMode))); + __ Push(r1, r0); - // value_reg (r0) is preserved. - // Done. - __ Ret(); + __ TailCallRuntime(Runtime::kSetProperty, 5, 1); - __ bind(&miss_force_generic); - Handle<Code> ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(ic, RelocInfo::CODE_TARGET); + return GetCode(flags); } diff --git a/deps/v8/src/arm/virtual-frame-arm-inl.h b/deps/v8/src/arm/virtual-frame-arm-inl.h new file mode 100644 index 0000000000..6a7902afff --- /dev/null +++ b/deps/v8/src/arm/virtual-frame-arm-inl.h @@ -0,0 +1,59 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_ +#define V8_VIRTUAL_FRAME_ARM_INL_H_ + +#include "assembler-arm.h" +#include "virtual-frame-arm.h" + +namespace v8 { +namespace internal { + +// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h +// file if such a thing existed. +MemOperand VirtualFrame::ParameterAt(int index) { + // Index -1 corresponds to the receiver. + ASSERT(-1 <= index); // -1 is the receiver. + ASSERT(index <= parameter_count()); + return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); +} + + // The receiver frame slot. +MemOperand VirtualFrame::Receiver() { + return ParameterAt(-1); +} + + +void VirtualFrame::Forget(int count) { + SpillAll(); + LowerHeight(count); +} + +} } // namespace v8::internal + +#endif // V8_VIRTUAL_FRAME_ARM_INL_H_ diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc new file mode 100644 index 0000000000..544e405dbb --- /dev/null +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -0,0 +1,843 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_ARM) + +#include "codegen-inl.h" +#include "register-allocator-inl.h" +#include "scopes.h" +#include "virtual-frame-inl.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm()) + +void VirtualFrame::PopToR1R0() { + // Shuffle things around so the top of stack is in r0 and r1. + MergeTOSTo(R0_R1_TOS); + // Pop the two registers off the stack so they are detached from the frame. + LowerHeight(2); + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + +void VirtualFrame::PopToR1() { + // Shuffle things around so the top of stack is only in r1. + MergeTOSTo(R1_TOS); + // Pop the register off the stack so it is detached from the frame. + LowerHeight(1); + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + +void VirtualFrame::PopToR0() { + // Shuffle things around so the top of stack only in r0. + MergeTOSTo(R0_TOS); + // Pop the register off the stack so it is detached from the frame. + LowerHeight(1); + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + +void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) { + if (Equals(expected)) return; + ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) == + expected->tos_known_smi_map_); + ASSERT(expected->IsCompatibleWith(this)); + MergeTOSTo(expected->top_of_stack_state_, cond); + ASSERT(register_allocation_map_ == expected->register_allocation_map_); +} + + +void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) { + if (Equals(expected)) return; + tos_known_smi_map_ &= expected->tos_known_smi_map_; + MergeTOSTo(expected->top_of_stack_state_, cond); + ASSERT(register_allocation_map_ == expected->register_allocation_map_); +} + + +void VirtualFrame::MergeTOSTo( + VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) { +#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b)) + switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) { + case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS): + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS): + __ pop(r0, cond); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS): + __ pop(r1, cond); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS): + __ pop(r0, cond); + __ pop(r1, cond); + break; + case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS): + __ pop(r1, cond); + __ pop(r0, cond); + break; + case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS): + __ push(r0, cond); + break; + case CASE_NUMBER(R0_TOS, R0_TOS): + break; + case CASE_NUMBER(R0_TOS, R1_TOS): + __ mov(r1, r0, LeaveCC, cond); + break; + case CASE_NUMBER(R0_TOS, R0_R1_TOS): + __ pop(r1, cond); + break; + case CASE_NUMBER(R0_TOS, R1_R0_TOS): + __ mov(r1, r0, LeaveCC, cond); + __ pop(r0, cond); + break; + case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS): + __ push(r1, cond); + break; + case CASE_NUMBER(R1_TOS, R0_TOS): + __ mov(r0, r1, LeaveCC, cond); + break; + case CASE_NUMBER(R1_TOS, R1_TOS): + break; + case CASE_NUMBER(R1_TOS, R0_R1_TOS): + __ mov(r0, r1, LeaveCC, cond); + __ pop(r1, cond); + break; + case CASE_NUMBER(R1_TOS, R1_R0_TOS): + __ pop(r0, cond); + break; + case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS): + __ Push(r1, r0, cond); + break; + case CASE_NUMBER(R0_R1_TOS, R0_TOS): + __ push(r1, cond); + break; + case CASE_NUMBER(R0_R1_TOS, R1_TOS): + __ push(r1, cond); + __ mov(r1, r0, LeaveCC, cond); + break; + case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS): + break; + case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS): + __ Swap(r0, r1, ip, cond); + break; + case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS): + __ Push(r0, r1, cond); + break; + case CASE_NUMBER(R1_R0_TOS, R0_TOS): + __ push(r0, cond); + __ mov(r0, r1, LeaveCC, cond); + break; + case CASE_NUMBER(R1_R0_TOS, R1_TOS): + __ push(r0, cond); + break; + case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS): + __ Swap(r0, r1, ip, cond); + break; + case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS): + break; + default: + UNREACHABLE(); +#undef CASE_NUMBER + } + // A conditional merge will be followed by a conditional branch and the + // fall-through code will have an unchanged virtual frame state. If the + // merge is unconditional ('al'ways) then it might be followed by a fall + // through. We need to update the virtual frame state to match the code we + // are falling into. The final case is an unconditional merge followed by an + // unconditional branch, in which case it doesn't matter what we do to the + // virtual frame state, because the virtual frame will be invalidated. + if (cond == al) { + top_of_stack_state_ = expected_top_of_stack_state; + } +} + + +void VirtualFrame::Enter() { + Comment cmnt(masm(), "[ Enter JS frame"); + +#ifdef DEBUG + // Verify that r1 contains a JS function. The following code relies + // on r2 being available for use. + if (FLAG_debug_code) { + Label map_check, done; + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &map_check); + __ stop("VirtualFrame::Enter - r1 is not a function (smi check)."); + __ bind(&map_check); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(eq, &done); + __ stop("VirtualFrame::Enter - r1 is not a function (map check)."); + __ bind(&done); + } +#endif // DEBUG + + // We are about to push four values to the frame. + Adjust(4); + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + // Adjust FP to point to saved FP. + __ add(fp, sp, Operand(2 * kPointerSize)); +} + + +void VirtualFrame::Exit() { + Comment cmnt(masm(), "[ Exit JS frame"); + // Record the location of the JS exit code for patching when setting + // break point. + __ RecordJSReturn(); + + // Drop the execution stack down to the frame pointer and restore the caller + // frame pointer and return address. + __ mov(sp, fp); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); +} + + +void VirtualFrame::AllocateStackSlots() { + int count = local_count(); + if (count > 0) { + Comment cmnt(masm(), "[ Allocate space for locals"); + Adjust(count); + // Initialize stack slots with 'undefined' value. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r2, Heap::kStackLimitRootIndex); + if (count < kLocalVarBound) { + // For less locals the unrolled loop is more compact. + for (int i = 0; i < count; i++) { + __ push(ip); + } + } else { + // For more locals a loop in generated code is more compact. + Label alloc_locals_loop; + __ mov(r1, Operand(count)); + __ bind(&alloc_locals_loop); + __ push(ip); + __ sub(r1, r1, Operand(1), SetCC); + __ b(ne, &alloc_locals_loop); + } + } else { + __ LoadRoot(r2, Heap::kStackLimitRootIndex); + } + // Check the stack for overflow or a break request. + masm()->cmp(sp, Operand(r2)); + StackCheckStub stub; + // Call the stub if lower. + masm()->mov(ip, + Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), + RelocInfo::CODE_TARGET), + LeaveCC, + lo); + masm()->Call(ip, lo); +} + + + +void VirtualFrame::PushReceiverSlotAddress() { + UNIMPLEMENTED(); +} + + +void VirtualFrame::PushTryHandler(HandlerType type) { + // Grow the expression stack by handler size less one (the return + // address in lr is already counted by a call instruction). + Adjust(kHandlerSize - 1); + __ PushTryHandler(IN_JAVASCRIPT, type); +} + + +void VirtualFrame::CallJSFunction(int arg_count) { + // InvokeFunction requires function in r1. + PopToR1(); + SpillAll(); + + // +1 for receiver. + Forget(arg_count + 1); + ASSERT(cgen()->HasValidEntryRegisters()); + ParameterCount count(arg_count); + __ InvokeFunction(r1, count, CALL_FUNCTION); + // Restore the context. + __ ldr(cp, Context()); +} + + +void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { + SpillAll(); + Forget(arg_count); + ASSERT(cgen()->HasValidEntryRegisters()); + __ CallRuntime(f, arg_count); +} + + +void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) { + SpillAll(); + Forget(arg_count); + ASSERT(cgen()->HasValidEntryRegisters()); + __ CallRuntime(id, arg_count); +} + + +#ifdef ENABLE_DEBUGGER_SUPPORT +void VirtualFrame::DebugBreak() { + ASSERT(cgen()->HasValidEntryRegisters()); + __ DebugBreak(); +} +#endif + + +void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, + InvokeJSFlags flags, + int arg_count) { + Forget(arg_count); + __ InvokeBuiltin(id, flags); +} + + +void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) { + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + PopToR0(); + SpillAll(); + __ mov(r2, Operand(name)); + CallCodeObject(ic, mode, 0); +} + + +void VirtualFrame::CallStoreIC(Handle<String> name, + bool is_contextual, + StrictModeFlag strict_mode) { + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict + : Builtins::StoreIC_Initialize)); + PopToR0(); + RelocInfo::Mode mode; + if (is_contextual) { + SpillAll(); + __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + mode = RelocInfo::CODE_TARGET_CONTEXT; + } else { + EmitPop(r1); + SpillAll(); + mode = RelocInfo::CODE_TARGET; + } + __ mov(r2, Operand(name)); + CallCodeObject(ic, mode, 0); +} + + +void VirtualFrame::CallKeyedLoadIC() { + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + PopToR1R0(); + SpillAll(); + CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); +} + + +void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) { + Handle<Code> ic(Builtins::builtin( + (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict + : Builtins::KeyedStoreIC_Initialize)); + PopToR1R0(); + SpillAll(); + EmitPop(r2); + CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); +} + + +void VirtualFrame::CallCodeObject(Handle<Code> code, + RelocInfo::Mode rmode, + int dropped_args) { + switch (code->kind()) { + case Code::CALL_IC: + case Code::KEYED_CALL_IC: + case Code::FUNCTION: + break; + case Code::KEYED_LOAD_IC: + case Code::LOAD_IC: + case Code::KEYED_STORE_IC: + case Code::STORE_IC: + ASSERT(dropped_args == 0); + break; + case Code::BUILTIN: + ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall)); + break; + default: + UNREACHABLE(); + break; + } + Forget(dropped_args); + ASSERT(cgen()->HasValidEntryRegisters()); + __ Call(code, rmode); +} + + +// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS. +const bool VirtualFrame::kR0InUse[TOS_STATES] = + { false, true, false, true, true }; +const bool VirtualFrame::kR1InUse[TOS_STATES] = + { false, false, true, true, true }; +const int VirtualFrame::kVirtualElements[TOS_STATES] = + { 0, 1, 1, 2, 2 }; +const Register VirtualFrame::kTopRegister[TOS_STATES] = + { r0, r0, r1, r1, r0 }; +const Register VirtualFrame::kBottomRegister[TOS_STATES] = + { r0, r0, r1, r0, r1 }; +const Register VirtualFrame::kAllocatedRegisters[ + VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 }; +// Popping is done by the transition implied by kStateAfterPop. Of course if +// there were no stack slots allocated to registers then the physical SP must +// be adjusted. +const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] = + { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS }; +// Pushing is done by the transition implied by kStateAfterPush. Of course if +// the maximum number of registers was already allocated to the top of stack +// slots then one register must be physically pushed onto the stack. +const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] = + { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS }; + + +bool VirtualFrame::SpilledScope::is_spilled_ = false; + + +void VirtualFrame::Drop(int count) { + ASSERT(count >= 0); + ASSERT(height() >= count); + // Discard elements from the virtual frame and free any registers. + int num_virtual_elements = kVirtualElements[top_of_stack_state_]; + while (num_virtual_elements > 0) { + Pop(); + num_virtual_elements--; + count--; + if (count == 0) return; + } + if (count == 0) return; + __ add(sp, sp, Operand(count * kPointerSize)); + LowerHeight(count); +} + + +void VirtualFrame::Pop() { + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + __ add(sp, sp, Operand(kPointerSize)); + } else { + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + LowerHeight(1); +} + + +void VirtualFrame::EmitPop(Register reg) { + ASSERT(!is_used(RegisterAllocator::ToNumber(reg))); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + __ pop(reg); + } else { + __ mov(reg, kTopRegister[top_of_stack_state_]); + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + LowerHeight(1); +} + + +void VirtualFrame::SpillAllButCopyTOSToR0() { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + break; + case R0_TOS: + __ push(r0); + break; + case R1_TOS: + __ push(r1); + __ mov(r0, r1); + break; + case R0_R1_TOS: + __ Push(r1, r0); + break; + case R1_R0_TOS: + __ Push(r0, r1); + __ mov(r0, r1); + break; + default: + UNREACHABLE(); + } + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + +void VirtualFrame::SpillAllButCopyTOSToR1() { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r1, MemOperand(sp, 0)); + break; + case R0_TOS: + __ push(r0); + __ mov(r1, r0); + break; + case R1_TOS: + __ push(r1); + break; + case R0_R1_TOS: + __ Push(r1, r0); + __ mov(r1, r0); + break; + case R1_R0_TOS: + __ Push(r0, r1); + break; + default: + UNREACHABLE(); + } + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + +void VirtualFrame::SpillAllButCopyTOSToR1R0() { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r1, MemOperand(sp, 0)); + __ ldr(r0, MemOperand(sp, kPointerSize)); + break; + case R0_TOS: + __ push(r0); + __ mov(r1, r0); + __ ldr(r0, MemOperand(sp, kPointerSize)); + break; + case R1_TOS: + __ push(r1); + __ ldr(r0, MemOperand(sp, kPointerSize)); + break; + case R0_R1_TOS: + __ Push(r1, r0); + __ Swap(r0, r1, ip); + break; + case R1_R0_TOS: + __ Push(r0, r1); + break; + default: + UNREACHABLE(); + } + top_of_stack_state_ = NO_TOS_REGISTERS; +} + + +Register VirtualFrame::Peek() { + AssertIsNotSpilled(); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + Register answer = kTopRegister[top_of_stack_state_]; + __ pop(answer); + return answer; + } else { + return kTopRegister[top_of_stack_state_]; + } +} + + +Register VirtualFrame::Peek2() { + AssertIsNotSpilled(); + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + case R0_TOS: + case R0_R1_TOS: + MergeTOSTo(R0_R1_TOS); + return r1; + case R1_TOS: + case R1_R0_TOS: + MergeTOSTo(R1_R0_TOS); + return r0; + default: + UNREACHABLE(); + return no_reg; + } +} + + +void VirtualFrame::Dup() { + if (SpilledScope::is_spilled()) { + __ ldr(ip, MemOperand(sp, 0)); + __ push(ip); + } else { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + top_of_stack_state_ = R0_TOS; + break; + case R0_TOS: + __ mov(r1, r0); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_TOS: + __ mov(r0, r1); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + case R0_R1_TOS: + __ push(r1); + __ mov(r1, r0); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_R0_TOS: + __ push(r0); + __ mov(r0, r1); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + default: + UNREACHABLE(); + } + } + RaiseHeight(1, tos_known_smi_map_ & 1); +} + + +void VirtualFrame::Dup2() { + if (SpilledScope::is_spilled()) { + __ ldr(ip, MemOperand(sp, kPointerSize)); + __ push(ip); + __ ldr(ip, MemOperand(sp, kPointerSize)); + __ push(ip); + } else { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + __ ldr(r1, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R0_R1_TOS; + break; + case R0_TOS: + __ push(r0); + __ ldr(r1, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_TOS: + __ push(r1); + __ ldr(r0, MemOperand(sp, kPointerSize)); + top_of_stack_state_ = R1_R0_TOS; + break; + case R0_R1_TOS: + __ Push(r1, r0); + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_R0_TOS: + __ Push(r0, r1); + top_of_stack_state_ = R1_R0_TOS; + break; + default: + UNREACHABLE(); + } + } + RaiseHeight(2, tos_known_smi_map_ & 3); +} + + +Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { + ASSERT(but_not_to_this_one.is(r0) || + but_not_to_this_one.is(r1) || + but_not_to_this_one.is(no_reg)); + LowerHeight(1); + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + if (but_not_to_this_one.is(r0)) { + __ pop(r1); + return r1; + } else { + __ pop(r0); + return r0; + } + } else { + Register answer = kTopRegister[top_of_stack_state_]; + ASSERT(!answer.is(but_not_to_this_one)); + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + return answer; + } +} + + +void VirtualFrame::EnsureOneFreeTOSRegister() { + if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) { + __ push(kBottomRegister[top_of_stack_state_]); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + top_of_stack_state_ = kStateAfterPop[top_of_stack_state_]; + } + ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters); +} + + +void VirtualFrame::EmitPush(Register reg, TypeInfo info) { + RaiseHeight(1, info.IsSmi() ? 1 : 0); + if (reg.is(cp)) { + // If we are pushing cp then we are about to make a call and things have to + // be pushed to the physical stack. There's nothing to be gained my moving + // to a TOS register and then pushing that, we might as well push to the + // physical stack immediately. + MergeTOSTo(NO_TOS_REGISTERS); + __ push(reg); + return; + } + if (SpilledScope::is_spilled()) { + ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); + __ push(reg); + return; + } + if (top_of_stack_state_ == NO_TOS_REGISTERS) { + if (reg.is(r0)) { + top_of_stack_state_ = R0_TOS; + return; + } + if (reg.is(r1)) { + top_of_stack_state_ = R1_TOS; + return; + } + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + Register dest = kTopRegister[top_of_stack_state_]; + __ Move(dest, reg); +} + + +void VirtualFrame::SetElementAt(Register reg, int this_far_down) { + if (this_far_down < kTOSKnownSmiMapSize) { + tos_known_smi_map_ &= ~(1 << this_far_down); + } + if (this_far_down == 0) { + Pop(); + Register dest = GetTOSRegister(); + if (dest.is(reg)) { + // We already popped one item off the top of the stack. If the only + // free register is the one we were asked to push then we have been + // asked to push a register that was already in use, which cannot + // happen. It therefore folows that there are two free TOS registers: + ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); + dest = dest.is(r0) ? r1 : r0; + } + __ mov(dest, reg); + EmitPush(dest); + } else if (this_far_down == 1) { + int virtual_elements = kVirtualElements[top_of_stack_state_]; + if (virtual_elements < 2) { + __ str(reg, ElementAt(this_far_down)); + } else { + ASSERT(virtual_elements == 2); + ASSERT(!reg.is(r0)); + ASSERT(!reg.is(r1)); + Register dest = kBottomRegister[top_of_stack_state_]; + __ mov(dest, reg); + } + } else { + ASSERT(this_far_down >= 2); + ASSERT(kVirtualElements[top_of_stack_state_] <= 2); + __ str(reg, ElementAt(this_far_down)); + } +} + + +Register VirtualFrame::GetTOSRegister() { + if (SpilledScope::is_spilled()) return r0; + + EnsureOneFreeTOSRegister(); + return kTopRegister[kStateAfterPush[top_of_stack_state_]]; +} + + +void VirtualFrame::EmitPush(Operand operand, TypeInfo info) { + RaiseHeight(1, info.IsSmi() ? 1 : 0); + if (SpilledScope::is_spilled()) { + __ mov(r0, operand); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ mov(kTopRegister[top_of_stack_state_], operand); +} + + +void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) { + RaiseHeight(1, info.IsSmi() ? 1 : 0); + if (SpilledScope::is_spilled()) { + __ ldr(r0, operand); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ ldr(kTopRegister[top_of_stack_state_], operand); +} + + +void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) { + RaiseHeight(1, 0); + if (SpilledScope::is_spilled()) { + __ LoadRoot(r0, index); + __ push(r0); + return; + } + EnsureOneFreeTOSRegister(); + top_of_stack_state_ = kStateAfterPush[top_of_stack_state_]; + __ LoadRoot(kTopRegister[top_of_stack_state_], index); +} + + +void VirtualFrame::EmitPushMultiple(int count, int src_regs) { + ASSERT(SpilledScope::is_spilled()); + Adjust(count); + __ stm(db_w, sp, src_regs); +} + + +void VirtualFrame::SpillAll() { + switch (top_of_stack_state_) { + case R1_R0_TOS: + masm()->push(r0); + // Fall through. + case R1_TOS: + masm()->push(r1); + top_of_stack_state_ = NO_TOS_REGISTERS; + break; + case R0_R1_TOS: + masm()->push(r1); + // Fall through. + case R0_TOS: + masm()->push(r0); + top_of_stack_state_ = NO_TOS_REGISTERS; + // Fall through. + case NO_TOS_REGISTERS: + break; + default: + UNREACHABLE(); + break; + } + ASSERT(register_allocation_map_ == 0); // Not yet implemented. +} + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h new file mode 100644 index 0000000000..76470bdc53 --- /dev/null +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -0,0 +1,520 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_ +#define V8_ARM_VIRTUAL_FRAME_ARM_H_ + +#include "register-allocator.h" + +namespace v8 { +namespace internal { + +// This dummy class is only used to create invalid virtual frames. +extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer; + + +// ------------------------------------------------------------------------- +// Virtual frames +// +// The virtual frame is an abstraction of the physical stack frame. It +// encapsulates the parameters, frame-allocated locals, and the expression +// stack. It supports push/pop operations on the expression stack, as well +// as random access to the expression stack elements, locals, and +// parameters. + +class VirtualFrame : public ZoneObject { + public: + class RegisterAllocationScope; + // A utility class to introduce a scope where the virtual frame is + // expected to remain spilled. The constructor spills the code + // generator's current frame, and keeps it spilled. + class SpilledScope BASE_EMBEDDED { + public: + explicit SpilledScope(VirtualFrame* frame) + : old_is_spilled_(is_spilled_) { + if (frame != NULL) { + if (!is_spilled_) { + frame->SpillAll(); + } else { + frame->AssertIsSpilled(); + } + } + is_spilled_ = true; + } + ~SpilledScope() { + is_spilled_ = old_is_spilled_; + } + static bool is_spilled() { return is_spilled_; } + + private: + static bool is_spilled_; + int old_is_spilled_; + + SpilledScope() { } + + friend class RegisterAllocationScope; + }; + + class RegisterAllocationScope BASE_EMBEDDED { + public: + // A utility class to introduce a scope where the virtual frame + // is not spilled, ie. where register allocation occurs. Eventually + // when RegisterAllocationScope is ubiquitous it can be removed + // along with the (by then unused) SpilledScope class. + inline explicit RegisterAllocationScope(CodeGenerator* cgen); + inline ~RegisterAllocationScope(); + + private: + CodeGenerator* cgen_; + bool old_is_spilled_; + + RegisterAllocationScope() { } + }; + + // An illegal index into the virtual frame. + static const int kIllegalIndex = -1; + + // Construct an initial virtual frame on entry to a JS function. + inline VirtualFrame(); + + // Construct an invalid virtual frame, used by JumpTargets. + inline VirtualFrame(InvalidVirtualFrameInitializer* dummy); + + // Construct a virtual frame as a clone of an existing one. + explicit inline VirtualFrame(VirtualFrame* original); + + inline CodeGenerator* cgen() const; + inline MacroAssembler* masm(); + + // The number of elements on the virtual frame. + int element_count() const { return element_count_; } + + // The height of the virtual expression stack. + inline int height() const; + + bool is_used(int num) { + switch (num) { + case 0: { // r0. + return kR0InUse[top_of_stack_state_]; + } + case 1: { // r1. + return kR1InUse[top_of_stack_state_]; + } + case 2: + case 3: + case 4: + case 5: + case 6: { // r2 to r6. + ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters); + ASSERT(num >= kFirstAllocatedRegister); + if ((register_allocation_map_ & + (1 << (num - kFirstAllocatedRegister))) == 0) { + return false; + } else { + return true; + } + } + default: { + ASSERT(num < kFirstAllocatedRegister || + num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters); + return false; + } + } + } + + // Add extra in-memory elements to the top of the frame to match an actual + // frame (eg, the frame after an exception handler is pushed). No code is + // emitted. + void Adjust(int count); + + // Forget elements from the top of the frame to match an actual frame (eg, + // the frame after a runtime call). No code is emitted except to bring the + // frame to a spilled state. + void Forget(int count); + + // Spill all values from the frame to memory. + void SpillAll(); + + void AssertIsSpilled() const { + ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); + ASSERT(register_allocation_map_ == 0); + } + + void AssertIsNotSpilled() { + ASSERT(!SpilledScope::is_spilled()); + } + + // Spill all occurrences of a specific register from the frame. + void Spill(Register reg) { + UNIMPLEMENTED(); + } + + // Spill all occurrences of an arbitrary register if possible. Return the + // register spilled or no_reg if it was not possible to free any register + // (ie, they all have frame-external references). Unimplemented. + Register SpillAnyRegister(); + + // Make this virtual frame have a state identical to an expected virtual + // frame. As a side effect, code may be emitted to make this frame match + // the expected one. + void MergeTo(VirtualFrame* expected, Condition cond = al); + void MergeTo(const VirtualFrame* expected, Condition cond = al); + + // Checks whether this frame can be branched to by the other frame. + bool IsCompatibleWith(const VirtualFrame* other) const { + return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0; + } + + inline void ForgetTypeInfo() { + tos_known_smi_map_ = 0; + } + + // Detach a frame from its code generator, perhaps temporarily. This + // tells the register allocator that it is free to use frame-internal + // registers. Used when the code generator's frame is switched from this + // one to NULL by an unconditional jump. + void DetachFromCodeGenerator() { + } + + // (Re)attach a frame to its code generator. This informs the register + // allocator that the frame-internal register references are active again. + // Used when a code generator's frame is switched from NULL to this one by + // binding a label. + void AttachToCodeGenerator() { + } + + // Emit code for the physical JS entry and exit frame sequences. After + // calling Enter, the virtual frame is ready for use; and after calling + // Exit it should not be used. Note that Enter does not allocate space in + // the physical frame for storing frame-allocated locals. + void Enter(); + void Exit(); + + // Prepare for returning from the frame by elements in the virtual frame. This + // avoids generating unnecessary merge code when jumping to the + // shared return site. No spill code emitted. Value to return should be in r0. + inline void PrepareForReturn(); + + // Number of local variables after when we use a loop for allocating. + static const int kLocalVarBound = 5; + + // Allocate and initialize the frame-allocated locals. + void AllocateStackSlots(); + + // The current top of the expression stack as an assembly operand. + MemOperand Top() { + AssertIsSpilled(); + return MemOperand(sp, 0); + } + + // An element of the expression stack as an assembly operand. + MemOperand ElementAt(int index) { + int adjusted_index = index - kVirtualElements[top_of_stack_state_]; + ASSERT(adjusted_index >= 0); + return MemOperand(sp, adjusted_index * kPointerSize); + } + + bool KnownSmiAt(int index) { + if (index >= kTOSKnownSmiMapSize) return false; + return (tos_known_smi_map_ & (1 << index)) != 0; + } + + // A frame-allocated local as an assembly operand. + inline MemOperand LocalAt(int index); + + // Push the address of the receiver slot on the frame. + void PushReceiverSlotAddress(); + + // The function frame slot. + MemOperand Function() { return MemOperand(fp, kFunctionOffset); } + + // The context frame slot. + MemOperand Context() { return MemOperand(fp, kContextOffset); } + + // A parameter as an assembly operand. + inline MemOperand ParameterAt(int index); + + // The receiver frame slot. + inline MemOperand Receiver(); + + // Push a try-catch or try-finally handler on top of the virtual frame. + void PushTryHandler(HandlerType type); + + // Call stub given the number of arguments it expects on (and + // removes from) the stack. + inline void CallStub(CodeStub* stub, int arg_count); + + // Call JS function from top of the stack with arguments + // taken from the stack. + void CallJSFunction(int arg_count); + + // Call runtime given the number of arguments expected on (and + // removed from) the stack. + void CallRuntime(Runtime::Function* f, int arg_count); + void CallRuntime(Runtime::FunctionId id, int arg_count); + +#ifdef ENABLE_DEBUGGER_SUPPORT + void DebugBreak(); +#endif + + // Invoke builtin given the number of arguments it expects on (and + // removes from) the stack. + void InvokeBuiltin(Builtins::JavaScript id, + InvokeJSFlags flag, + int arg_count); + + // Call load IC. Receiver is on the stack and is consumed. Result is returned + // in r0. + void CallLoadIC(Handle<String> name, RelocInfo::Mode mode); + + // Call store IC. If the load is contextual, value is found on top of the + // frame. If not, value and receiver are on the frame. Both are consumed. + // Result is returned in r0. + void CallStoreIC(Handle<String> name, bool is_contextual, + StrictModeFlag strict_mode); + + // Call keyed load IC. Key and receiver are on the stack. Both are consumed. + // Result is returned in r0. + void CallKeyedLoadIC(); + + // Call keyed store IC. Value, key and receiver are on the stack. All three + // are consumed. Result is returned in r0. + void CallKeyedStoreIC(StrictModeFlag strict_mode); + + // Call into an IC stub given the number of arguments it removes + // from the stack. Register arguments to the IC stub are implicit, + // and depend on the type of IC stub. + void CallCodeObject(Handle<Code> ic, + RelocInfo::Mode rmode, + int dropped_args); + + // Drop a number of elements from the top of the expression stack. May + // emit code to affect the physical frame. Does not clobber any registers + // excepting possibly the stack pointer. + void Drop(int count); + + // Drop one element. + void Drop() { Drop(1); } + + // Pop an element from the top of the expression stack. Discards + // the result. + void Pop(); + + // Pop an element from the top of the expression stack. The register + // will be one normally used for the top of stack register allocation + // so you can't hold on to it if you push on the stack. + Register PopToRegister(Register but_not_to_this_one = no_reg); + + // Look at the top of the stack. The register returned is aliased and + // must be copied to a scratch register before modification. + Register Peek(); + + // Look at the value beneath the top of the stack. The register returned is + // aliased and must be copied to a scratch register before modification. + Register Peek2(); + + // Duplicate the top of stack. + void Dup(); + + // Duplicate the two elements on top of stack. + void Dup2(); + + // Flushes all registers, but it puts a copy of the top-of-stack in r0. + void SpillAllButCopyTOSToR0(); + + // Flushes all registers, but it puts a copy of the top-of-stack in r1. + void SpillAllButCopyTOSToR1(); + + // Flushes all registers, but it puts a copy of the top-of-stack in r1 + // and the next value on the stack in r0. + void SpillAllButCopyTOSToR1R0(); + + // Pop and save an element from the top of the expression stack and + // emit a corresponding pop instruction. + void EmitPop(Register reg); + + // Takes the top two elements and puts them in r0 (top element) and r1 + // (second element). + void PopToR1R0(); + + // Takes the top element and puts it in r1. + void PopToR1(); + + // Takes the top element and puts it in r0. + void PopToR0(); + + // Push an element on top of the expression stack and emit a + // corresponding push instruction. + void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown()); + void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown()); + void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown()); + void EmitPushRoot(Heap::RootListIndex index); + + // Overwrite the nth thing on the stack. If the nth position is in a + // register then this turns into a mov, otherwise an str. Afterwards + // you can still use the register even if it is a register that can be + // used for TOS (r0 or r1). + void SetElementAt(Register reg, int this_far_down); + + // Get a register which is free and which must be immediately used to + // push on the top of the stack. + Register GetTOSRegister(); + + // Push multiple registers on the stack and the virtual frame + // Register are selected by setting bit in src_regs and + // are pushed in decreasing order: r15 .. r0. + void EmitPushMultiple(int count, int src_regs); + + static Register scratch0() { return r7; } + static Register scratch1() { return r9; } + + private: + static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; + static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset; + static const int kContextOffset = StandardFrameConstants::kContextOffset; + + static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize; + static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots. + + // 5 states for the top of stack, which can be in memory or in r0 and r1. + enum TopOfStack { + NO_TOS_REGISTERS, + R0_TOS, + R1_TOS, + R1_R0_TOS, + R0_R1_TOS, + TOS_STATES + }; + + static const int kMaxTOSRegisters = 2; + + static const bool kR0InUse[TOS_STATES]; + static const bool kR1InUse[TOS_STATES]; + static const int kVirtualElements[TOS_STATES]; + static const TopOfStack kStateAfterPop[TOS_STATES]; + static const TopOfStack kStateAfterPush[TOS_STATES]; + static const Register kTopRegister[TOS_STATES]; + static const Register kBottomRegister[TOS_STATES]; + + // We allocate up to 5 locals in registers. + static const int kNumberOfAllocatedRegisters = 5; + // r2 to r6 are allocated to locals. + static const int kFirstAllocatedRegister = 2; + + static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters]; + + static Register AllocatedRegister(int r) { + ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters); + return kAllocatedRegisters[r]; + } + + // The number of elements on the stack frame. + int element_count_; + TopOfStack top_of_stack_state_:3; + int register_allocation_map_:kNumberOfAllocatedRegisters; + static const int kTOSKnownSmiMapSize = 4; + unsigned tos_known_smi_map_:kTOSKnownSmiMapSize; + + // The index of the element that is at the processor's stack pointer + // (the sp register). For now since everything is in memory it is given + // by the number of elements on the not-very-virtual stack frame. + int stack_pointer() { return element_count_ - 1; } + + // The number of frame-allocated locals and parameters respectively. + inline int parameter_count() const; + inline int local_count() const; + + // The index of the element that is at the processor's frame pointer + // (the fp register). The parameters, receiver, function, and context + // are below the frame pointer. + inline int frame_pointer() const; + + // The index of the first parameter. The receiver lies below the first + // parameter. + int param0_index() { return 1; } + + // The index of the context slot in the frame. It is immediately + // below the frame pointer. + inline int context_index(); + + // The index of the function slot in the frame. It is below the frame + // pointer and context slot. + inline int function_index(); + + // The index of the first local. Between the frame pointer and the + // locals lies the return address. + inline int local0_index() const; + + // The index of the base of the expression stack. + inline int expression_base_index() const; + + // Convert a frame index into a frame pointer relative offset into the + // actual stack. + inline int fp_relative(int index); + + // Spill all elements in registers. Spill the top spilled_args elements + // on the frame. Sync all other frame elements. + // Then drop dropped_args elements from the virtual frame, to match + // the effect of an upcoming call that will drop them from the stack. + void PrepareForCall(int spilled_args, int dropped_args); + + // If all top-of-stack registers are in use then the lowest one is pushed + // onto the physical stack and made free. + void EnsureOneFreeTOSRegister(); + + // Emit instructions to get the top of stack state from where we are to where + // we want to be. + void MergeTOSTo(TopOfStack expected_state, Condition cond = al); + + inline bool Equals(const VirtualFrame* other); + + inline void LowerHeight(int count) { + element_count_ -= count; + if (count >= kTOSKnownSmiMapSize) { + tos_known_smi_map_ = 0; + } else { + tos_known_smi_map_ >>= count; + } + } + + inline void RaiseHeight(int count, unsigned known_smi_map = 0) { + ASSERT(count >= 32 || known_smi_map < (1u << count)); + element_count_ += count; + if (count >= kTOSKnownSmiMapSize) { + tos_known_smi_map_ = known_smi_map; + } else { + tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map); + } + } + + friend class JumpTarget; +}; + + +} } // namespace v8::internal + +#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_ |