// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" #if V8_TARGET_ARCH_IA32 #include "ia32/lithium-codegen-ia32.h" #include "ic.h" #include "code-stubs.h" #include "deoptimizer.h" #include "stub-cache.h" #include "codegen.h" #include "hydrogen-osr.h" namespace v8 { namespace internal { static SaveFPRegsMode GetSaveFPRegsMode() { // We don't need to save floating point regs when generating the snapshot return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs; } // When invoking builtins, we need to record the safepoint in the middle of // the invoke instruction sequence generated by the macro assembler. class SafepointGenerator V8_FINAL : public CallWrapper { public: SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, Safepoint::DeoptMode mode) : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} virtual ~SafepointGenerator() {} virtual void BeforeCall(int call_size) const V8_OVERRIDE {} virtual void AfterCall() const V8_OVERRIDE { codegen_->RecordSafepoint(pointers_, deopt_mode_); } private: LCodeGen* codegen_; LPointerMap* pointers_; Safepoint::DeoptMode deopt_mode_; }; #define __ masm()-> bool LCodeGen::GenerateCode() { LPhase phase("Z_Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done in GeneratePrologue). FrameScope frame_scope(masm_, StackFrame::MANUAL); support_aligned_spilled_doubles_ = info()->IsOptimizing(); dynamic_frame_alignment_ = info()->IsOptimizing() && ((chunk()->num_double_slots() > 2 && !chunk()->graph()->is_recursive()) || !info()->osr_ast_id().IsNone()); return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && GenerateJumpTable() && GenerateSafepointTable(); } void LCodeGen::FinishCode(Handle code) { ASSERT(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); if (FLAG_weak_embedded_maps_in_optimized_code) { RegisterDependentCodeForEmbeddedMaps(code); } PopulateDeoptimizationData(code); if (!info()->IsStub()) { Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); } info()->CommitDependencies(code); } void LCodeGen::Abort(BailoutReason reason) { info()->set_bailout_reason(reason); status_ = ABORTED; } #ifdef _MSC_VER void LCodeGen::MakeSureStackPagesMapped(int offset) { const int kPageSize = 4 * KB; for (offset -= kPageSize; offset > 0; offset -= kPageSize) { __ mov(Operand(esp, offset), eax); } } #endif void LCodeGen::SaveCallerDoubles() { ASSERT(info()->saves_caller_doubles()); ASSERT(NeedsEagerFrame()); Comment(";;; Save clobbered callee double registers"); CpuFeatureScope scope(masm(), SSE2); int count = 0; BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); while (!save_iterator.Done()) { __ movsd(MemOperand(esp, count * kDoubleSize), XMMRegister::FromAllocationIndex(save_iterator.Current())); save_iterator.Advance(); count++; } } void LCodeGen::RestoreCallerDoubles() { ASSERT(info()->saves_caller_doubles()); ASSERT(NeedsEagerFrame()); Comment(";;; Restore clobbered callee double registers"); CpuFeatureScope scope(masm(), SSE2); BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); int count = 0; while (!save_iterator.Done()) { __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), MemOperand(esp, count * kDoubleSize)); save_iterator.Advance(); count++; } } bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); if (info()->IsOptimizing()) { ProfileEntryHookStub::MaybeCallEntryHook(masm_); #ifdef DEBUG if (strlen(FLAG_stop_at) > 0 && info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { __ int3(); } #endif // Strict mode functions and builtins need to replace the receiver // with undefined when called as functions (without an explicit // receiver object). ecx is zero for method calls and non-zero for // function calls. if (!info_->is_classic_mode() || info_->is_native()) { Label ok; __ test(ecx, Operand(ecx)); __ j(zero, &ok, Label::kNear); // +1 for return address. int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; __ mov(Operand(esp, receiver_offset), Immediate(isolate()->factory()->undefined_value())); __ bind(&ok); } if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { // Move state of dynamic frame alignment into edx. __ Set(edx, Immediate(kNoAlignmentPadding)); Label do_not_pad, align_loop; STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); // Align esp + 4 to a multiple of 2 * kPointerSize. __ test(esp, Immediate(kPointerSize)); __ j(not_zero, &do_not_pad, Label::kNear); __ push(Immediate(0)); __ mov(ebx, esp); __ mov(edx, Immediate(kAlignmentPaddingPushed)); // Copy arguments, receiver, and return address. __ mov(ecx, Immediate(scope()->num_parameters() + 2)); __ bind(&align_loop); __ mov(eax, Operand(ebx, 1 * kPointerSize)); __ mov(Operand(ebx, 0), eax); __ add(Operand(ebx), Immediate(kPointerSize)); __ dec(ecx); __ j(not_zero, &align_loop, Label::kNear); __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); __ bind(&do_not_pad); } } info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { ASSERT(!frame_is_built_); frame_is_built_ = true; __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); info()->AddNoFrameRange(0, masm_->pc_offset()); } if (info()->IsOptimizing() && dynamic_frame_alignment_ && FLAG_debug_code) { __ test(esp, Immediate(kPointerSize)); __ Assert(zero, kFrameIsExpectedToBeAligned); } // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); ASSERT(slots != 0 || !info()->IsOptimizing()); if (slots > 0) { if (slots == 1) { if (dynamic_frame_alignment_) { __ push(edx); } else { __ push(Immediate(kNoAlignmentPadding)); } } else { if (FLAG_debug_code) { __ sub(Operand(esp), Immediate(slots * kPointerSize)); #ifdef _MSC_VER MakeSureStackPagesMapped(slots * kPointerSize); #endif __ push(eax); __ mov(Operand(eax), Immediate(slots)); Label loop; __ bind(&loop); __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue)); __ dec(eax); __ j(not_zero, &loop); __ pop(eax); } else { __ sub(Operand(esp), Immediate(slots * kPointerSize)); #ifdef _MSC_VER MakeSureStackPagesMapped(slots * kPointerSize); #endif } if (support_aligned_spilled_doubles_) { Comment(";;; Store dynamic frame alignment tag for spilled doubles"); // Store dynamic frame alignment state in the first local. int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; if (dynamic_frame_alignment_) { __ mov(Operand(ebp, offset), edx); } else { __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); } } } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { SaveCallerDoubles(); } } // Possibly allocate a local context. int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is still in edi. __ push(edi); if (heap_slots <= FastNewContextStub::kMaximumSlots) { FastNewContextStub stub(heap_slots); __ CallStub(&stub); } else { __ CallRuntime(Runtime::kNewFunctionContext, 1); } RecordSafepoint(Safepoint::kNoLazyDeopt); // Context is returned in both eax and esi. It replaces the context // passed to us. It's saved in the stack and kept live in esi. __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); // Copy parameters into context if necessary. int num_parameters = scope()->num_parameters(); for (int i = 0; i < num_parameters; i++) { Variable* var = scope()->parameter(i); if (var->IsContextSlot()) { int parameter_offset = StandardFrameConstants::kCallerSPOffset + (num_parameters - 1 - i) * kPointerSize; // Load parameter from stack. __ mov(eax, Operand(ebp, parameter_offset)); // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); // Update the write barrier. This clobbers eax and ebx. __ RecordWriteContextSlot(esi, context_offset, eax, ebx, kDontSaveFPRegs); } } Comment(";;; End allocate local context"); } // Trace the call. if (FLAG_trace && info()->IsOptimizing()) { // We have not executed any compiled code yet, so esi still holds the // incoming context. __ CallRuntime(Runtime::kTraceEnter, 0); } return !is_aborted(); } void LCodeGen::GenerateOsrPrologue() { // Generate the OSR entry prologue at the first unknown OSR value, or if there // are none, at the OSR entrypoint instruction. if (osr_pc_offset_ >= 0) return; osr_pc_offset_ = masm()->pc_offset(); // Move state of dynamic frame alignment into edx. __ Set(edx, Immediate(kNoAlignmentPadding)); if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { Label do_not_pad, align_loop; // Align ebp + 4 to a multiple of 2 * kPointerSize. __ test(ebp, Immediate(kPointerSize)); __ j(zero, &do_not_pad, Label::kNear); __ push(Immediate(0)); __ mov(ebx, esp); __ mov(edx, Immediate(kAlignmentPaddingPushed)); // Move all parts of the frame over one word. The frame consists of: // unoptimized frame slots, alignment state, context, frame pointer, return // address, receiver, and the arguments. __ mov(ecx, Immediate(scope()->num_parameters() + 5 + graph()->osr()->UnoptimizedFrameSlots())); __ bind(&align_loop); __ mov(eax, Operand(ebx, 1 * kPointerSize)); __ mov(Operand(ebx, 0), eax); __ add(Operand(ebx), Immediate(kPointerSize)); __ dec(ecx); __ j(not_zero, &align_loop, Label::kNear); __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); __ sub(Operand(ebp), Immediate(kPointerSize)); __ bind(&do_not_pad); } // Save the first local, which is overwritten by the alignment state. Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize); __ push(alignment_loc); // Set the dynamic frame alignment state. __ mov(alignment_loc, edx); // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); ASSERT(slots >= 1); __ sub(esp, Immediate((slots - 1) * kPointerSize)); } void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); } void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { if (!CpuFeatures::IsSupported(SSE2)) { if (instr->IsGoto()) { x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); } else if (FLAG_debug_code && FLAG_enable_slow_asserts && !instr->IsGap() && !instr->IsReturn()) { if (instr->ClobbersDoubleRegisters()) { if (instr->HasDoubleRegisterResult()) { ASSERT_EQ(1, x87_stack_.depth()); } else { ASSERT_EQ(0, x87_stack_.depth()); } } __ VerifyX87StackDepth(x87_stack_.depth()); } } } bool LCodeGen::GenerateJumpTable() { Label needs_frame; if (jump_table_.length() > 0) { Comment(";;; -------------------- Jump table --------------------"); } for (int i = 0; i < jump_table_.length(); i++) { __ bind(&jump_table_[i].label); Address entry = jump_table_[i].address; Deoptimizer::BailoutType type = jump_table_[i].bailout_type; int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); if (id == Deoptimizer::kNotDeoptimizationEntry) { Comment(";;; jump table entry %d.", i); } else { Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); } if (jump_table_[i].needs_frame) { ASSERT(!info()->saves_caller_doubles()); __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); if (needs_frame.is_bound()) { __ jmp(&needs_frame); } else { __ bind(&needs_frame); __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); // This variant of deopt can only be used with stubs. Since we don't // have a function pointer to install in the stack frame that we're // building, install a special marker there instead. ASSERT(info()->IsStub()); __ push(Immediate(Smi::FromInt(StackFrame::STUB))); // Push a PC inside the function so that the deopt code can find where // the deopt comes from. It doesn't have to be the precise return // address of a "calling" LAZY deopt, it only has to be somewhere // inside the code body. Label push_approx_pc; __ call(&push_approx_pc); __ bind(&push_approx_pc); // Push the continuation which was stashed were the ebp should // be. Replace it with the saved ebp. __ push(MemOperand(esp, 3 * kPointerSize)); __ mov(MemOperand(esp, 4 * kPointerSize), ebp); __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); __ ret(0); // Call the continuation without clobbering registers. } } else { if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { RestoreCallerDoubles(); } __ call(entry, RelocInfo::RUNTIME_ENTRY); } } return !is_aborted(); } bool LCodeGen::GenerateDeferredCode() { ASSERT(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; X87Stack copy(code->x87_stack()); x87_stack_ = copy; HValue* value = instructions_->at(code->instruction_index())->hydrogen_value(); RecordAndWritePosition(value->position()); Comment(";;; <@%d,#%d> " "-------------------- Deferred %s --------------------", code->instruction_index(), code->instr()->hydrogen_value()->id(), code->instr()->Mnemonic()); __ bind(code->entry()); if (NeedsDeferredFrame()) { Comment(";;; Build frame"); ASSERT(!frame_is_built_); ASSERT(info()->IsStub()); frame_is_built_ = true; // Build the frame in such a way that esi isn't trashed. __ push(ebp); // Caller's frame pointer. __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); __ push(Immediate(Smi::FromInt(StackFrame::STUB))); __ lea(ebp, Operand(esp, 2 * kPointerSize)); Comment(";;; Deferred code"); } code->Generate(); if (NeedsDeferredFrame()) { __ bind(code->done()); Comment(";;; Destroy frame"); ASSERT(frame_is_built_); frame_is_built_ = false; __ mov(esp, ebp); __ pop(ebp); } __ jmp(code->exit()); } } // Deferred code is the last part of the instruction sequence. Mark // the generated code as done unless we bailed out. if (!is_aborted()) status_ = DONE; return !is_aborted(); } bool LCodeGen::GenerateSafepointTable() { ASSERT(is_done()); if (!info()->IsStub()) { // For lazy deoptimization we need space to patch a call after every call. // Ensure there is always space for such patching, even if the code ends // in a call. int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); while (masm()->pc_offset() < target_offset) { masm()->nop(); } } safepoints_.Emit(masm(), GetStackSlotCount()); return !is_aborted(); } Register LCodeGen::ToRegister(int index) const { return Register::FromAllocationIndex(index); } X87Register LCodeGen::ToX87Register(int index) const { return X87Register::FromAllocationIndex(index); } XMMRegister LCodeGen::ToDoubleRegister(int index) const { return XMMRegister::FromAllocationIndex(index); } void LCodeGen::X87LoadForUsage(X87Register reg) { ASSERT(x87_stack_.Contains(reg)); x87_stack_.Fxch(reg); x87_stack_.pop(); } void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { ASSERT(x87_stack_.Contains(reg1)); ASSERT(x87_stack_.Contains(reg2)); x87_stack_.Fxch(reg1, 1); x87_stack_.Fxch(reg2); x87_stack_.pop(); x87_stack_.pop(); } void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { ASSERT(is_mutable_); ASSERT(Contains(reg) && stack_depth_ > other_slot); int i = ArrayIndex(reg); int st = st2idx(i); if (st != other_slot) { int other_i = st2idx(other_slot); X87Register other = stack_[other_i]; stack_[other_i] = reg; stack_[i] = other; if (st == 0) { __ fxch(other_slot); } else if (other_slot == 0) { __ fxch(st); } else { __ fxch(st); __ fxch(other_slot); __ fxch(st); } } } int LCodeGen::X87Stack::st2idx(int pos) { return stack_depth_ - pos - 1; } int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { for (int i = 0; i < stack_depth_; i++) { if (stack_[i].is(reg)) return i; } UNREACHABLE(); return -1; } bool LCodeGen::X87Stack::Contains(X87Register reg) { for (int i = 0; i < stack_depth_; i++) { if (stack_[i].is(reg)) return true; } return false; } void LCodeGen::X87Stack::Free(X87Register reg) { ASSERT(is_mutable_); ASSERT(Contains(reg)); int i = ArrayIndex(reg); int st = st2idx(i); if (st > 0) { // keep track of how fstp(i) changes the order of elements int tos_i = st2idx(0); stack_[i] = stack_[tos_i]; } pop(); __ fstp(st); } void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { if (x87_stack_.Contains(dst)) { x87_stack_.Fxch(dst); __ fstp(0); } else { x87_stack_.push(dst); } X87Fld(src, opts); } void LCodeGen::X87Fld(Operand src, X87OperandType opts) { ASSERT(!src.is_reg_only()); switch (opts) { case kX87DoubleOperand: __ fld_d(src); break; case kX87FloatOperand: __ fld_s(src); break; case kX87IntOperand: __ fild_s(src); break; default: UNREACHABLE(); } } void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { ASSERT(!dst.is_reg_only()); x87_stack_.Fxch(src); switch (opts) { case kX87DoubleOperand: __ fst_d(dst); break; case kX87IntOperand: __ fist_s(dst); break; default: UNREACHABLE(); } } void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { ASSERT(is_mutable_); if (Contains(reg)) { Free(reg); } // Mark this register as the next register to write to stack_[stack_depth_] = reg; } void LCodeGen::X87Stack::CommitWrite(X87Register reg) { ASSERT(is_mutable_); // Assert the reg is prepared to write, but not on the virtual stack yet ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && stack_depth_ < X87Register::kNumAllocatableRegisters); stack_depth_++; } void LCodeGen::X87PrepareBinaryOp( X87Register left, X87Register right, X87Register result) { // You need to use DefineSameAsFirst for x87 instructions ASSERT(result.is(left)); x87_stack_.Fxch(right, 1); x87_stack_.Fxch(left); } void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) { bool double_inputs = instr->HasDoubleRegisterInput(); // Flush stack from tos down, since FreeX87() will mess with tos for (int i = stack_depth_-1; i >= 0; i--) { X87Register reg = stack_[i]; // Skip registers which contain the inputs for the next instruction // when flushing the stack if (double_inputs && instr->IsDoubleInput(reg, cgen)) { continue; } Free(reg); if (i < stack_depth_-1) i++; } } if (instr->IsReturn()) { while (stack_depth_ > 0) { __ fstp(0); stack_depth_--; } if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); } } void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { ASSERT(stack_depth_ <= 1); // If ever used for new stubs producing two pairs of doubles joined into two // phis this assert hits. That situation is not handled, since the two stacks // might have st0 and st1 swapped. if (current_block_id + 1 != goto_instr->block_id()) { // If we have a value on the x87 stack on leaving a block, it must be a // phi input. If the next block we compile is not the join block, we have // to discard the stack state. stack_depth_ = 0; } } void LCodeGen::EmitFlushX87ForDeopt() { // The deoptimizer does not support X87 Registers. But as long as we // deopt from a stub its not a problem, since we will re-materialize the // original stub inputs, which can't be double registers. ASSERT(info()->IsStub()); if (FLAG_debug_code && FLAG_enable_slow_asserts) { __ pushfd(); __ VerifyX87StackDepth(x87_stack_.depth()); __ popfd(); } for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); } Register LCodeGen::ToRegister(LOperand* op) const { ASSERT(op->IsRegister()); return ToRegister(op->index()); } X87Register LCodeGen::ToX87Register(LOperand* op) const { ASSERT(op->IsDoubleRegister()); return ToX87Register(op->index()); } XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { ASSERT(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { return ToRepresentation(op, Representation::Integer32()); } int32_t LCodeGen::ToRepresentation(LConstantOperand* op, const Representation& r) const { HConstant* constant = chunk_->LookupConstant(op); int32_t value = constant->Integer32Value(); if (r.IsInteger32()) return value; ASSERT(r.IsSmiOrTagged()); return reinterpret_cast(Smi::FromInt(value)); } Handle LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(isolate()); } double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); ASSERT(constant->HasDoubleValue()); return constant->DoubleValue(); } ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); ASSERT(constant->HasExternalReferenceValue()); return constant->ExternalReferenceValue(); } bool LCodeGen::IsInteger32(LConstantOperand* op) const { return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); } bool LCodeGen::IsSmi(LConstantOperand* op) const { return chunk_->LookupLiteralRepresentation(op).IsSmi(); } static int ArgumentsOffsetWithoutFrame(int index) { ASSERT(index < 0); return -(index + 1) * kPointerSize + kPCOnStackSize; } Operand LCodeGen::ToOperand(LOperand* op) const { if (op->IsRegister()) return Operand(ToRegister(op)); if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return Operand(ebp, StackSlotOffset(op->index())); } else { // Retrieve parameter without eager stack-frame relative to the // stack-pointer. return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); } } Operand LCodeGen::HighOperand(LOperand* op) { ASSERT(op->IsDoubleStackSlot()); if (NeedsEagerFrame()) { return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize); } else { // Retrieve parameter without eager stack-frame relative to the // stack-pointer. return Operand( esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); } } void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation) { if (environment == NULL) return; // The translation includes one command per value in the environment. int translation_size = environment->translation_size(); // The output frame height does not include the parameters. int height = translation_size - environment->parameter_count(); WriteTranslation(environment->outer(), translation); bool has_closure_id = !info()->closure().is_null() && !info()->closure().is_identical_to(environment->closure()); int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; switch (environment->frame_type()) { case JS_FUNCTION: translation->BeginJSFrame(environment->ast_id(), closure_id, height); break; case JS_CONSTRUCT: translation->BeginConstructStubFrame(closure_id, translation_size); break; case JS_GETTER: ASSERT(translation_size == 1); ASSERT(height == 0); translation->BeginGetterStubFrame(closure_id); break; case JS_SETTER: ASSERT(translation_size == 2); ASSERT(height == 0); translation->BeginSetterStubFrame(closure_id); break; case ARGUMENTS_ADAPTOR: translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; case STUB: translation->BeginCompiledStubFrame(); break; default: UNREACHABLE(); } int object_index = 0; int dematerialized_index = 0; for (int i = 0; i < translation_size; ++i) { LOperand* value = environment->values()->at(i); AddToTranslation(environment, translation, value, environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); } } void LCodeGen::AddToTranslation(LEnvironment* environment, Translation* translation, LOperand* op, bool is_tagged, bool is_uint32, int* object_index_pointer, int* dematerialized_index_pointer) { if (op == LEnvironment::materialization_marker()) { int object_index = (*object_index_pointer)++; if (environment->ObjectIsDuplicateAt(object_index)) { int dupe_of = environment->ObjectDuplicateOfAt(object_index); translation->DuplicateObject(dupe_of); return; } int object_length = environment->ObjectLengthAt(object_index); if (environment->ObjectIsArgumentsAt(object_index)) { translation->BeginArgumentsObject(object_length); } else { translation->BeginCapturedObject(object_length); } int dematerialized_index = *dematerialized_index_pointer; int env_offset = environment->translation_size() + dematerialized_index; *dematerialized_index_pointer += object_length; for (int i = 0; i < object_length; ++i) { LOperand* value = environment->values()->at(env_offset + i); AddToTranslation(environment, translation, value, environment->HasTaggedValueAt(env_offset + i), environment->HasUint32ValueAt(env_offset + i), object_index_pointer, dematerialized_index_pointer); } return; } if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); } else if (is_uint32) { translation->StoreUint32StackSlot(op->index()); } else { translation->StoreInt32StackSlot(op->index()); } } else if (op->IsDoubleStackSlot()) { translation->StoreDoubleStackSlot(op->index()); } else if (op->IsArgument()) { ASSERT(is_tagged); int src_index = GetStackSlotCount() + op->index(); translation->StoreStackSlot(src_index); } else if (op->IsRegister()) { Register reg = ToRegister(op); if (is_tagged) { translation->StoreRegister(reg); } else if (is_uint32) { translation->StoreUint32Register(reg); } else { translation->StoreInt32Register(reg); } } else if (op->IsDoubleRegister()) { XMMRegister reg = ToDoubleRegister(op); translation->StoreDoubleRegister(reg); } else if (op->IsConstantOperand()) { HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); translation->StoreLiteral(src_index); } else { UNREACHABLE(); } } void LCodeGen::CallCodeGeneric(Handle code, RelocInfo::Mode mode, LInstruction* instr, SafepointMode safepoint_mode) { ASSERT(instr != NULL); __ call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); // Signal that we don't inline smi code before these stubs in the // optimizing code generator. if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) { __ nop(); } } void LCodeGen::CallCode(Handle code, RelocInfo::Mode mode, LInstruction* instr) { CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); } void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr, SaveFPRegsMode save_doubles) { ASSERT(instr != NULL); ASSERT(instr->HasPointerMap()); __ CallRuntime(fun, argc, save_doubles); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); ASSERT(info()->is_calling()); } void LCodeGen::LoadContextFromDeferred(LOperand* context) { if (context->IsRegister()) { if (!ToRegister(context).is(esi)) { __ mov(esi, ToRegister(context)); } } else if (context->IsStackSlot()) { __ mov(esi, ToOperand(context)); } else if (context->IsConstantOperand()) { HConstant* constant = chunk_->LookupConstant(LConstantOperand::cast(context)); __ LoadObject(esi, Handle::cast(constant->handle(isolate()))); } else { UNREACHABLE(); } } void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction* instr, LOperand* context) { LoadContextFromDeferred(context); __ CallRuntimeSaveDoubles(id); RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); ASSERT(info()->is_calling()); } void LCodeGen::RegisterEnvironmentForDeoptimization( LEnvironment* environment, Safepoint::DeoptMode mode) { if (!environment->HasBeenRegistered()) { // Physical stack frame layout: // -x ............. -4 0 ..................................... y // [incoming arguments] [spill slots] [pushed outgoing arguments] // Layout of the environment: // 0 ..................................................... size-1 // [parameters] [locals] [expression stack including arguments] // Layout of the translation: // 0 ........................................................ size - 1 + 4 // [expression stack including arguments] [locals] [4 words] [parameters] // |>------------ translation_size ------------<| int frame_count = 0; int jsframe_count = 0; for (LEnvironment* e = environment; e != NULL; e = e->outer()) { ++frame_count; if (e->frame_type() == JS_FUNCTION) { ++jsframe_count; } } Translation translation(&translations_, frame_count, jsframe_count, zone()); WriteTranslation(environment, &translation); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); environment->Register(deoptimization_index, translation.index(), (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); deoptimizations_.Add(environment, zone()); } } void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment, Deoptimizer::BailoutType bailout_type) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); ASSERT(info()->IsOptimizing() || info()->IsStub()); Address entry = Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { Abort(kBailoutWasNotPrepared); return; } if (DeoptEveryNTimes()) { ExternalReference count = ExternalReference::stress_deopt_count(isolate()); Label no_deopt; __ pushfd(); __ push(eax); __ mov(eax, Operand::StaticVariable(count)); __ sub(eax, Immediate(1)); __ j(not_zero, &no_deopt, Label::kNear); if (FLAG_trap_on_deopt) __ int3(); __ mov(eax, Immediate(FLAG_deopt_every_n_times)); __ mov(Operand::StaticVariable(count), eax); __ pop(eax); __ popfd(); ASSERT(frame_is_built_); __ call(entry, RelocInfo::RUNTIME_ENTRY); __ bind(&no_deopt); __ mov(Operand::StaticVariable(count), eax); __ pop(eax); __ popfd(); } // Before Instructions which can deopt, we normally flush the x87 stack. But // we can have inputs or outputs of the current instruction on the stack, // thus we need to flush them here from the physical stack to leave it in a // consistent state. if (x87_stack_.depth() > 0) { Label done; if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); EmitFlushX87ForDeopt(); __ bind(&done); } if (info()->ShouldTrapOnDeopt()) { Label done; if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); __ int3(); __ bind(&done); } ASSERT(info()->IsStub() || frame_is_built_); if (cc == no_condition && frame_is_built_) { __ call(entry, RelocInfo::RUNTIME_ENTRY); } else { // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (jump_table_.is_empty() || jump_table_.last().address != entry || jump_table_.last().needs_frame != !frame_is_built_ || jump_table_.last().bailout_type != bailout_type) { Deoptimizer::JumpTableEntry table_entry(entry, bailout_type, !frame_is_built_); jump_table_.Add(table_entry, zone()); } if (cc == no_condition) { __ jmp(&jump_table_.last().label); } else { __ j(cc, &jump_table_.last().label); } } } void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; DeoptimizeIf(cc, environment, bailout_type); } void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle code) { ZoneList > maps(1, zone()); ZoneList > objects(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) { if (it.rinfo()->target_object()->IsMap()) { Handle map(Map::cast(it.rinfo()->target_object())); maps.Add(map, zone()); } else if (it.rinfo()->target_object()->IsJSObject()) { Handle object(JSObject::cast(it.rinfo()->target_object())); objects.Add(object, zone()); } } } #ifdef VERIFY_HEAP // This disables verification of weak embedded objects after full GC. // AddDependentCode can cause a GC, which would observe the state where // this code is not yet in the depended code lists of the embedded maps. NoWeakObjectVerificationScope disable_verification_of_embedded_objects; #endif for (int i = 0; i < maps.length(); i++) { maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); } for (int i = 0; i < objects.length(); i++) { AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); } } void LCodeGen::PopulateDeoptimizationData(Handle code) { int length = deoptimizations_.length(); if (length == 0) return; Handle data = factory()->NewDeoptimizationInputData(length, TENURED); Handle translations = translations_.CreateByteArray(isolate()->factory()); data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); Handle literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); { AllowDeferredHandleDereference copy_handles; for (int i = 0; i < deoptimization_literals_.length(); i++) { literals->set(i, *deoptimization_literals_[i]); } data->SetLiteralArray(*literals); } data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); // Populate the deoptimization entries. for (int i = 0; i < length; i++) { LEnvironment* env = deoptimizations_[i]; data->SetAstId(i, env->ast_id()); data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); data->SetArgumentsStackHeight(i, Smi::FromInt(env->arguments_stack_height())); data->SetPc(i, Smi::FromInt(env->pc_offset())); } code->set_deoptimization_data(*data); } int LCodeGen::DefineDeoptimizationLiteral(Handle literal) { int result = deoptimization_literals_.length(); for (int i = 0; i < deoptimization_literals_.length(); ++i) { if (deoptimization_literals_[i].is_identical_to(literal)) return i; } deoptimization_literals_.Add(literal, zone()); return result; } void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { ASSERT(deoptimization_literals_.length() == 0); const ZoneList >* inlined_closures = chunk()->inlined_closures(); for (int i = 0, length = inlined_closures->length(); i < length; i++) { DefineDeoptimizationLiteral(inlined_closures->at(i)); } inlined_function_count_ = deoptimization_literals_.length(); } void LCodeGen::RecordSafepointWithLazyDeopt( LInstruction* instr, SafepointMode safepoint_mode) { if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); } else { ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kLazyDeopt); } } void LCodeGen::RecordSafepoint( LPointerMap* pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode deopt_mode) { ASSERT(kind == expected_safepoint_kind_); const ZoneList* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); for (int i = 0; i < operands->length(); i++) { LOperand* pointer = operands->at(i); if (pointer->IsStackSlot()) { safepoint.DefinePointerSlot(pointer->index(), zone()); } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { safepoint.DefinePointerRegister(ToRegister(pointer), zone()); } } } void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode) { RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); } void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { LPointerMap empty_pointers(zone()); RecordSafepoint(&empty_pointers, mode); } void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode) { RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); } void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); masm()->positions_recorder()->WriteRecordedPositions(); } static const char* LabelType(LLabel* label) { if (label->is_loop_header()) return " (loop header)"; if (label->is_osr_entry()) return " (OSR entry)"; return ""; } void LCodeGen::DoLabel(LLabel* label) { Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", current_instruction_, label->hydrogen_value()->id(), label->block_id(), LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); DoGap(label); } void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } void LCodeGen::DoGap(LGap* gap) { for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; i++) { LGap::InnerPosition inner_pos = static_cast(i); LParallelMove* move = gap->GetParallelMove(inner_pos); if (move != NULL) DoParallelMove(move); } } void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } void LCodeGen::DoParameter(LParameter* instr) { // Nothing to do. } void LCodeGen::DoCallStub(LCallStub* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->result()).is(eax)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { RegExpConstructResultStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::RegExpExec: { RegExpExecStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { SubStringStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { StringCompareStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::TranscendentalCache: { TranscendentalCacheStub stub(instr->transcendental_type(), TranscendentalCacheStub::TAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } default: UNREACHABLE(); } } void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { GenerateOsrPrologue(); } void LCodeGen::DoModI(LModI* instr) { HMod* hmod = instr->hydrogen(); HValue* left = hmod->left(); HValue* right = hmod->right(); if (hmod->HasPowerOf2Divisor()) { // TODO(svenpanne) We should really do the strength reduction on the // Hydrogen level. Register left_reg = ToRegister(instr->left()); ASSERT(left_reg.is(ToRegister(instr->result()))); // Note: The code below even works when right contains kMinInt. int32_t divisor = Abs(right->GetInteger32Constant()); Label left_is_not_negative, done; if (left->CanBeNegative()) { __ test(left_reg, Operand(left_reg)); __ j(not_sign, &left_is_not_negative, Label::kNear); __ neg(left_reg); __ and_(left_reg, divisor - 1); __ neg(left_reg); if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(zero, instr->environment()); } __ jmp(&done, Label::kNear); } __ bind(&left_is_not_negative); __ and_(left_reg, divisor - 1); __ bind(&done); } else { Register left_reg = ToRegister(instr->left()); ASSERT(left_reg.is(eax)); Register right_reg = ToRegister(instr->right()); ASSERT(!right_reg.is(eax)); ASSERT(!right_reg.is(edx)); Register result_reg = ToRegister(instr->result()); ASSERT(result_reg.is(edx)); Label done; // Check for x % 0, idiv would signal a divide error. We have to // deopt in this case because we can't return a NaN. if (right->CanBeZero()) { __ test(right_reg, Operand(right_reg)); DeoptimizeIf(zero, instr->environment()); } // Check for kMinInt % -1, idiv would signal a divide error. We // have to deopt if we care about -0, because we can't return that. if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { Label no_overflow_possible; __ cmp(left_reg, kMinInt); __ j(not_equal, &no_overflow_possible, Label::kNear); __ cmp(right_reg, -1); if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(equal, instr->environment()); } else { __ j(not_equal, &no_overflow_possible, Label::kNear); __ Set(result_reg, Immediate(0)); __ jmp(&done, Label::kNear); } __ bind(&no_overflow_possible); } // Sign extend dividend in eax into edx:eax. __ cdq(); // If we care about -0, test if the dividend is <0 and the result is 0. if (left->CanBeNegative() && hmod->CanBeZero() && hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { Label positive_left; __ test(left_reg, Operand(left_reg)); __ j(not_sign, &positive_left, Label::kNear); __ idiv(right_reg); __ test(result_reg, Operand(result_reg)); DeoptimizeIf(zero, instr->environment()); __ jmp(&done, Label::kNear); __ bind(&positive_left); } __ idiv(right_reg); __ bind(&done); } } void LCodeGen::DoDivI(LDivI* instr) { if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) { Register dividend = ToRegister(instr->left()); int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant(); int32_t test_value = 0; int32_t power = 0; if (divisor > 0) { test_value = divisor - 1; power = WhichPowerOf2(divisor); } else { // Check for (0 / -x) that will produce negative zero. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ test(dividend, Operand(dividend)); DeoptimizeIf(zero, instr->environment()); } // Check for (kMinInt / -1). if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { __ cmp(dividend, kMinInt); DeoptimizeIf(zero, instr->environment()); } test_value = - divisor - 1; power = WhichPowerOf2(-divisor); } if (test_value != 0) { if (instr->hydrogen()->CheckFlag( HInstruction::kAllUsesTruncatingToInt32)) { Label done, negative; __ cmp(dividend, 0); __ j(less, &negative, Label::kNear); __ sar(dividend, power); if (divisor < 0) __ neg(dividend); __ jmp(&done, Label::kNear); __ bind(&negative); __ neg(dividend); __ sar(dividend, power); if (divisor > 0) __ neg(dividend); __ bind(&done); return; // Don't fall through to "__ neg" below. } else { // Deoptimize if remainder is not 0. __ test(dividend, Immediate(test_value)); DeoptimizeIf(not_zero, instr->environment()); __ sar(dividend, power); } } if (divisor < 0) __ neg(dividend); return; } LOperand* right = instr->right(); ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(ToRegister(instr->left()).is(eax)); ASSERT(!ToRegister(instr->right()).is(eax)); ASSERT(!ToRegister(instr->right()).is(edx)); Register left_reg = eax; // Check for x / 0. Register right_reg = ToRegister(right); if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { __ test(right_reg, ToOperand(right)); DeoptimizeIf(zero, instr->environment()); } // Check for (0 / -x) that will produce negative zero. if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) { Label left_not_zero; __ test(left_reg, Operand(left_reg)); __ j(not_zero, &left_not_zero, Label::kNear); __ test(right_reg, ToOperand(right)); DeoptimizeIf(sign, instr->environment()); __ bind(&left_not_zero); } // Check for (kMinInt / -1). if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) { Label left_not_min_int; __ cmp(left_reg, kMinInt); __ j(not_zero, &left_not_min_int, Label::kNear); __ cmp(right_reg, -1); DeoptimizeIf(zero, instr->environment()); __ bind(&left_not_min_int); } // Sign extend to edx. __ cdq(); __ idiv(right_reg); if (instr->is_flooring()) { Label done; __ test(edx, edx); __ j(zero, &done, Label::kNear); __ xor_(edx, right_reg); __ sar(edx, 31); __ add(eax, edx); __ bind(&done); } else if (!instr->hydrogen()->CheckFlag( HInstruction::kAllUsesTruncatingToInt32)) { // Deoptimize if remainder is not 0. __ test(edx, Operand(edx)); DeoptimizeIf(not_zero, instr->environment()); } } void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { ASSERT(instr->right()->IsConstantOperand()); Register dividend = ToRegister(instr->left()); int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); Register result = ToRegister(instr->result()); switch (divisor) { case 0: DeoptimizeIf(no_condition, instr->environment()); return; case 1: __ Move(result, dividend); return; case -1: __ Move(result, dividend); __ neg(result); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(zero, instr->environment()); } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(overflow, instr->environment()); } return; } uint32_t divisor_abs = abs(divisor); if (IsPowerOf2(divisor_abs)) { int32_t power = WhichPowerOf2(divisor_abs); if (divisor < 0) { // Input[dividend] is clobbered. // The sequence is tedious because neg(dividend) might overflow. __ mov(result, dividend); __ sar(dividend, 31); __ neg(result); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(zero, instr->environment()); } __ shl(dividend, 32 - power); __ sar(result, power); __ not_(dividend); // Clear result.sign if dividend.sign is set. __ and_(result, dividend); } else { __ Move(result, dividend); __ sar(result, power); } } else { ASSERT(ToRegister(instr->left()).is(eax)); ASSERT(ToRegister(instr->result()).is(edx)); Register scratch = ToRegister(instr->temp()); // Find b which: 2^b < divisor_abs < 2^(b+1). unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs); unsigned shift = 32 + b; // Precision +1bit (effectively). double multiplier_f = static_cast(static_cast(1) << shift) / divisor_abs; int64_t multiplier; if (multiplier_f - floor(multiplier_f) < 0.5) { multiplier = static_cast(floor(multiplier_f)); } else { multiplier = static_cast(floor(multiplier_f)) + 1; } // The multiplier is a uint32. ASSERT(multiplier > 0 && multiplier < (static_cast(1) << 32)); __ mov(scratch, dividend); if (divisor < 0 && instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ test(dividend, dividend); DeoptimizeIf(zero, instr->environment()); } __ mov(edx, static_cast(multiplier)); __ imul(edx); if (static_cast(multiplier) < 0) { __ add(edx, scratch); } Register reg_lo = eax; Register reg_byte_scratch = scratch; if (!reg_byte_scratch.is_byte_register()) { __ xchg(reg_lo, reg_byte_scratch); reg_lo = scratch; reg_byte_scratch = eax; } if (divisor < 0) { __ xor_(reg_byte_scratch, reg_byte_scratch); __ cmp(reg_lo, 0x40000000); __ setcc(above, reg_byte_scratch); __ neg(edx); __ sub(edx, reg_byte_scratch); } else { __ xor_(reg_byte_scratch, reg_byte_scratch); __ cmp(reg_lo, 0xC0000000); __ setcc(above_equal, reg_byte_scratch); __ add(edx, reg_byte_scratch); } __ sar(edx, shift - 32); } } void LCodeGen::DoMulI(LMulI* instr) { Register left = ToRegister(instr->left()); LOperand* right = instr->right(); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ mov(ToRegister(instr->temp()), left); } if (right->IsConstantOperand()) { // Try strength reductions on the multiplication. // All replacement instructions are at most as long as the imul // and have better latency. int constant = ToInteger32(LConstantOperand::cast(right)); if (constant == -1) { __ neg(left); } else if (constant == 0) { __ xor_(left, Operand(left)); } else if (constant == 2) { __ add(left, Operand(left)); } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { // If we know that the multiplication can't overflow, it's safe to // use instructions that don't set the overflow flag for the // multiplication. switch (constant) { case 1: // Do nothing. break; case 3: __ lea(left, Operand(left, left, times_2, 0)); break; case 4: __ shl(left, 2); break; case 5: __ lea(left, Operand(left, left, times_4, 0)); break; case 8: __ shl(left, 3); break; case 9: __ lea(left, Operand(left, left, times_8, 0)); break; case 16: __ shl(left, 4); break; default: __ imul(left, left, constant); break; } } else { __ imul(left, left, constant); } } else { if (instr->hydrogen()->representation().IsSmi()) { __ SmiUntag(left); } __ imul(left, ToOperand(right)); } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(overflow, instr->environment()); } if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Bail out if the result is supposed to be negative zero. Label done; __ test(left, Operand(left)); __ j(not_zero, &done, Label::kNear); if (right->IsConstantOperand()) { if (ToInteger32(LConstantOperand::cast(right)) < 0) { DeoptimizeIf(no_condition, instr->environment()); } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { __ cmp(ToRegister(instr->temp()), Immediate(0)); DeoptimizeIf(less, instr->environment()); } } else { // Test the non-zero operand for negative sign. __ or_(ToRegister(instr->temp()), ToOperand(right)); DeoptimizeIf(sign, instr->environment()); } __ bind(&done); } } void LCodeGen::DoBitI(LBitI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); ASSERT(left->Equals(instr->result())); ASSERT(left->IsRegister()); if (right->IsConstantOperand()) { int32_t right_operand = ToRepresentation(LConstantOperand::cast(right), instr->hydrogen()->representation()); switch (instr->op()) { case Token::BIT_AND: __ and_(ToRegister(left), right_operand); break; case Token::BIT_OR: __ or_(ToRegister(left), right_operand); break; case Token::BIT_XOR: if (right_operand == int32_t(~0)) { __ not_(ToRegister(left)); } else { __ xor_(ToRegister(left), right_operand); } break; default: UNREACHABLE(); break; } } else { switch (instr->op()) { case Token::BIT_AND: __ and_(ToRegister(left), ToOperand(right)); break; case Token::BIT_OR: __ or_(ToRegister(left), ToOperand(right)); break; case Token::BIT_XOR: __ xor_(ToRegister(left), ToOperand(right)); break; default: UNREACHABLE(); break; } } } void LCodeGen::DoShiftI(LShiftI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); ASSERT(left->Equals(instr->result())); ASSERT(left->IsRegister()); if (right->IsRegister()) { ASSERT(ToRegister(right).is(ecx)); switch (instr->op()) { case Token::ROR: __ ror_cl(ToRegister(left)); if (instr->can_deopt()) { __ test(ToRegister(left), ToRegister(left)); DeoptimizeIf(sign, instr->environment()); } break; case Token::SAR: __ sar_cl(ToRegister(left)); break; case Token::SHR: __ shr_cl(ToRegister(left)); if (instr->can_deopt()) { __ test(ToRegister(left), ToRegister(left)); DeoptimizeIf(sign, instr->environment()); } break; case Token::SHL: __ shl_cl(ToRegister(left)); break; default: UNREACHABLE(); break; } } else { int value = ToInteger32(LConstantOperand::cast(right)); uint8_t shift_count = static_cast(value & 0x1F); switch (instr->op()) { case Token::ROR: if (shift_count == 0 && instr->can_deopt()) { __ test(ToRegister(left), ToRegister(left)); DeoptimizeIf(sign, instr->environment()); } else { __ ror(ToRegister(left), shift_count); } break; case Token::SAR: if (shift_count != 0) { __ sar(ToRegister(left), shift_count); } break; case Token::SHR: if (shift_count == 0 && instr->can_deopt()) { __ test(ToRegister(left), ToRegister(left)); DeoptimizeIf(sign, instr->environment()); } else { __ shr(ToRegister(left), shift_count); } break; case Token::SHL: if (shift_count != 0) { if (instr->hydrogen_value()->representation().IsSmi() && instr->can_deopt()) { if (shift_count != 1) { __ shl(ToRegister(left), shift_count - 1); } __ SmiTag(ToRegister(left)); DeoptimizeIf(overflow, instr->environment()); } else { __ shl(ToRegister(left), shift_count); } } break; default: UNREACHABLE(); break; } } } void LCodeGen::DoSubI(LSubI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); ASSERT(left->Equals(instr->result())); if (right->IsConstantOperand()) { __ sub(ToOperand(left), ToImmediate(right, instr->hydrogen()->representation())); } else { __ sub(ToRegister(left), ToOperand(right)); } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(overflow, instr->environment()); } } void LCodeGen::DoConstantI(LConstantI* instr) { __ Set(ToRegister(instr->result()), Immediate(instr->value())); } void LCodeGen::DoConstantS(LConstantS* instr) { __ Set(ToRegister(instr->result()), Immediate(instr->value())); } void LCodeGen::DoConstantD(LConstantD* instr) { double v = instr->value(); uint64_t int_val = BitCast(v); int32_t lower = static_cast(int_val); int32_t upper = static_cast(int_val >> (kBitsPerInt)); ASSERT(instr->result()->IsDoubleRegister()); if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { __ push(Immediate(upper)); __ push(Immediate(lower)); X87Register reg = ToX87Register(instr->result()); X87Mov(reg, Operand(esp, 0)); __ add(Operand(esp), Immediate(kDoubleSize)); } else { CpuFeatureScope scope1(masm(), SSE2); XMMRegister res = ToDoubleRegister(instr->result()); if (int_val == 0) { __ xorps(res, res); } else { Register temp = ToRegister(instr->temp()); if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope scope2(masm(), SSE4_1); if (lower != 0) { __ Set(temp, Immediate(lower)); __ movd(res, Operand(temp)); __ Set(temp, Immediate(upper)); __ pinsrd(res, Operand(temp), 1); } else { __ xorps(res, res); __ Set(temp, Immediate(upper)); __ pinsrd(res, Operand(temp), 1); } } else { __ Set(temp, Immediate(upper)); __ movd(res, Operand(temp)); __ psllq(res, 32); if (lower != 0) { XMMRegister xmm_scratch = double_scratch0(); __ Set(temp, Immediate(lower)); __ movd(xmm_scratch, Operand(temp)); __ orps(res, xmm_scratch); } } } } } void LCodeGen::DoConstantE(LConstantE* instr) { __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); } void LCodeGen::DoConstantT(LConstantT* instr) { Register reg = ToRegister(instr->result()); Handle handle = instr->value(isolate()); AllowDeferredHandleDereference smi_check; __ LoadObject(reg, handle); } void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { Register result = ToRegister(instr->result()); Register map = ToRegister(instr->value()); __ EnumLength(result, map); } void LCodeGen::DoElementsKind(LElementsKind* instr) { Register result = ToRegister(instr->result()); Register input = ToRegister(instr->value()); // Load map into |result|. __ mov(result, FieldOperand(input, HeapObject::kMapOffset)); // Load the map's "bit field 2" into |result|. We only need the first byte, // but the following masking takes care of that anyway. __ mov(result, FieldOperand(result, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. __ and_(result, Map::kElementsKindMask); __ shr(result, Map::kElementsKindShift); } void LCodeGen::DoValueOf(LValueOf* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register map = ToRegister(instr->temp()); ASSERT(input.is(result)); Label done; if (!instr->hydrogen()->value()->IsHeapObject()) { // If the object is a smi return the object. __ JumpIfSmi(input, &done, Label::kNear); } // If the object is not a value type, return the object. __ CmpObjectType(input, JS_VALUE_TYPE, map); __ j(not_equal, &done, Label::kNear); __ mov(result, FieldOperand(input, JSValue::kValueOffset)); __ bind(&done); } void LCodeGen::DoDateField(LDateField* instr) { Register object = ToRegister(instr->date()); Register result = ToRegister(instr->result()); Register scratch = ToRegister(instr->temp()); Smi* index = instr->index(); Label runtime, done; ASSERT(object.is(result)); ASSERT(object.is(eax)); __ test(object, Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); __ CmpObjectType(object, JS_DATE_TYPE, scratch); DeoptimizeIf(not_equal, instr->environment()); if (index->value() == 0) { __ mov(result, FieldOperand(object, JSDate::kValueOffset)); } else { if (index->value() < JSDate::kFirstUncachedField) { ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); __ mov(scratch, Operand::StaticVariable(stamp)); __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); __ j(not_equal, &runtime, Label::kNear); __ mov(result, FieldOperand(object, JSDate::kValueOffset + kPointerSize * index->value())); __ jmp(&done, Label::kNear); } __ bind(&runtime); __ PrepareCallCFunction(2, scratch); __ mov(Operand(esp, 0), object); __ mov(Operand(esp, 1 * kPointerSize), Immediate(index)); __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); __ bind(&done); } } Operand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, String::Encoding encoding) { if (index->IsConstantOperand()) { int offset = ToRepresentation(LConstantOperand::cast(index), Representation::Integer32()); if (encoding == String::TWO_BYTE_ENCODING) { offset *= kUC16Size; } STATIC_ASSERT(kCharSize == 1); return FieldOperand(string, SeqString::kHeaderSize + offset); } return FieldOperand( string, ToRegister(index), encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, SeqString::kHeaderSize); } void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { String::Encoding encoding = instr->hydrogen()->encoding(); Register result = ToRegister(instr->result()); Register string = ToRegister(instr->string()); if (FLAG_debug_code) { __ push(string); __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type : two_byte_seq_type)); __ Check(equal, kUnexpectedStringType); __ pop(string); } Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); if (encoding == String::ONE_BYTE_ENCODING) { __ movzx_b(result, operand); } else { __ movzx_w(result, operand); } } void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { String::Encoding encoding = instr->hydrogen()->encoding(); Register string = ToRegister(instr->string()); if (FLAG_debug_code) { Register value = ToRegister(instr->value()); Register index = ToRegister(instr->index()); static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; int encoding_mask = instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING ? one_byte_seq_type : two_byte_seq_type; __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); } Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); if (instr->value()->IsConstantOperand()) { int value = ToRepresentation(LConstantOperand::cast(instr->value()), Representation::Integer32()); ASSERT_LE(0, value); if (encoding == String::ONE_BYTE_ENCODING) { ASSERT_LE(value, String::kMaxOneByteCharCode); __ mov_b(operand, static_cast(value)); } else { ASSERT_LE(value, String::kMaxUtf16CodeUnit); __ mov_w(operand, static_cast(value)); } } else { Register value = ToRegister(instr->value()); if (encoding == String::ONE_BYTE_ENCODING) { __ mov_b(operand, value); } else { __ mov_w(operand, value); } } } void LCodeGen::DoThrow(LThrow* instr) { __ push(ToOperand(instr->value())); ASSERT(ToRegister(instr->context()).is(esi)); CallRuntime(Runtime::kThrow, 1, instr); if (FLAG_debug_code) { Comment("Unreachable code."); __ int3(); } } void LCodeGen::DoAddI(LAddI* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { if (right->IsConstantOperand()) { int32_t offset = ToRepresentation(LConstantOperand::cast(right), instr->hydrogen()->representation()); __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); } else { Operand address(ToRegister(left), ToRegister(right), times_1, 0); __ lea(ToRegister(instr->result()), address); } } else { if (right->IsConstantOperand()) { __ add(ToOperand(left), ToImmediate(right, instr->hydrogen()->representation())); } else { __ add(ToRegister(left), ToOperand(right)); } if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { DeoptimizeIf(overflow, instr->environment()); } } } void LCodeGen::DoMathMinMax(LMathMinMax* instr) { CpuFeatureScope scope(masm(), SSE2); LOperand* left = instr->left(); LOperand* right = instr->right(); ASSERT(left->Equals(instr->result())); HMathMinMax::Operation operation = instr->hydrogen()->operation(); if (instr->hydrogen()->representation().IsSmiOrInteger32()) { Label return_left; Condition condition = (operation == HMathMinMax::kMathMin) ? less_equal : greater_equal; if (right->IsConstantOperand()) { Operand left_op = ToOperand(left); Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), instr->hydrogen()->representation()); __ cmp(left_op, immediate); __ j(condition, &return_left, Label::kNear); __ mov(left_op, immediate); } else { Register left_reg = ToRegister(left); Operand right_op = ToOperand(right); __ cmp(left_reg, right_op); __ j(condition, &return_left, Label::kNear); __ mov(left_reg, right_op); } __ bind(&return_left); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); Label check_nan_left, check_zero, return_left, return_right; Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; XMMRegister left_reg = ToDoubleRegister(left); XMMRegister right_reg = ToDoubleRegister(right); __ ucomisd(left_reg, right_reg); __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. __ j(equal, &check_zero, Label::kNear); // left == right. __ j(condition, &return_left, Label::kNear); __ jmp(&return_right, Label::kNear); __ bind(&check_zero); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(left_reg, xmm_scratch); __ j(not_equal, &return_left, Label::kNear); // left == right != 0. // At this point, both left and right are either 0 or -0. if (operation == HMathMinMax::kMathMin) { __ orpd(left_reg, right_reg); } else { // Since we operate on +0 and/or -0, addsd and andsd have the same effect. __ addsd(left_reg, right_reg); } __ jmp(&return_left, Label::kNear); __ bind(&check_nan_left); __ ucomisd(left_reg, left_reg); // NaN check. __ j(parity_even, &return_left, Label::kNear); // left == NaN. __ bind(&return_right); __ movaps(left_reg, right_reg); __ bind(&return_left); } } void LCodeGen::DoArithmeticD(LArithmeticD* instr) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister left = ToDoubleRegister(instr->left()); XMMRegister right = ToDoubleRegister(instr->right()); XMMRegister result = ToDoubleRegister(instr->result()); switch (instr->op()) { case Token::ADD: __ addsd(left, right); break; case Token::SUB: __ subsd(left, right); break; case Token::MUL: __ mulsd(left, right); break; case Token::DIV: __ divsd(left, right); // Don't delete this mov. It may improve performance on some CPUs, // when there is a mulsd depending on the result __ movaps(left, left); break; case Token::MOD: { // Pass two doubles as arguments on the stack. __ PrepareCallCFunction(4, eax); __ movsd(Operand(esp, 0 * kDoubleSize), left); __ movsd(Operand(esp, 1 * kDoubleSize), right); __ CallCFunction( ExternalReference::double_fp_operation(Token::MOD, isolate()), 4); // Return value is in st(0) on ia32. // Store it into the result register. __ sub(Operand(esp), Immediate(kDoubleSize)); __ fstp_d(Operand(esp, 0)); __ movsd(result, Operand(esp, 0)); __ add(Operand(esp), Immediate(kDoubleSize)); break; } default: UNREACHABLE(); break; } } else { X87Register left = ToX87Register(instr->left()); X87Register right = ToX87Register(instr->right()); X87Register result = ToX87Register(instr->result()); if (instr->op() != Token::MOD) { X87PrepareBinaryOp(left, right, result); } switch (instr->op()) { case Token::ADD: __ fadd_i(1); break; case Token::SUB: __ fsub_i(1); break; case Token::MUL: __ fmul_i(1); break; case Token::DIV: __ fdiv_i(1); break; case Token::MOD: { // Pass two doubles as arguments on the stack. __ PrepareCallCFunction(4, eax); X87Mov(Operand(esp, 1 * kDoubleSize), right); X87Mov(Operand(esp, 0), left); X87Free(right); ASSERT(left.is(result)); X87PrepareToWrite(result); __ CallCFunction( ExternalReference::double_fp_operation(Token::MOD, isolate()), 4); // Return value is in st(0) on ia32. X87CommitWrite(result); break; } default: UNREACHABLE(); break; } } } void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->left()).is(edx)); ASSERT(ToRegister(instr->right()).is(eax)); ASSERT(ToRegister(instr->result()).is(eax)); BinaryOpICStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ nop(); // Signals no inlined code. } template void LCodeGen::EmitBranch(InstrType instr, Condition cc) { int left_block = instr->TrueDestination(chunk_); int right_block = instr->FalseDestination(chunk_); int next_block = GetNextEmittedBlock(); if (right_block == left_block || cc == no_condition) { EmitGoto(left_block); } else if (left_block == next_block) { __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); } else if (right_block == next_block) { __ j(cc, chunk_->GetAssemblyLabel(left_block)); } else { __ j(cc, chunk_->GetAssemblyLabel(left_block)); __ jmp(chunk_->GetAssemblyLabel(right_block)); } } template void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { int false_block = instr->FalseDestination(chunk_); if (cc == no_condition) { __ jmp(chunk_->GetAssemblyLabel(false_block)); } else { __ j(cc, chunk_->GetAssemblyLabel(false_block)); } } void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsSmiOrInteger32()) { Register reg = ToRegister(instr->value()); __ test(reg, Operand(reg)); EmitBranch(instr, not_zero); } else if (r.IsDouble()) { ASSERT(!info()->IsStub()); CpuFeatureScope scope(masm(), SSE2); XMMRegister reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(reg, xmm_scratch); EmitBranch(instr, not_equal); } else { ASSERT(r.IsTagged()); Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { ASSERT(!info()->IsStub()); __ cmp(reg, factory()->true_value()); EmitBranch(instr, equal); } else if (type.IsSmi()) { ASSERT(!info()->IsStub()); __ test(reg, Operand(reg)); EmitBranch(instr, not_equal); } else if (type.IsJSArray()) { ASSERT(!info()->IsStub()); EmitBranch(instr, no_condition); } else if (type.IsHeapNumber()) { ASSERT(!info()->IsStub()); CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); EmitBranch(instr, not_equal); } else if (type.IsString()) { ASSERT(!info()->IsStub()); __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); EmitBranch(instr, not_equal); } else { ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); if (expected.Contains(ToBooleanStub::UNDEFINED)) { // undefined -> false. __ cmp(reg, factory()->undefined_value()); __ j(equal, instr->FalseLabel(chunk_)); } if (expected.Contains(ToBooleanStub::BOOLEAN)) { // true -> true. __ cmp(reg, factory()->true_value()); __ j(equal, instr->TrueLabel(chunk_)); // false -> false. __ cmp(reg, factory()->false_value()); __ j(equal, instr->FalseLabel(chunk_)); } if (expected.Contains(ToBooleanStub::NULL_TYPE)) { // 'null' -> false. __ cmp(reg, factory()->null_value()); __ j(equal, instr->FalseLabel(chunk_)); } if (expected.Contains(ToBooleanStub::SMI)) { // Smis: 0 -> false, all other -> true. __ test(reg, Operand(reg)); __ j(equal, instr->FalseLabel(chunk_)); __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); } else if (expected.NeedsMap()) { // If we need a map later and have a Smi -> deopt. __ test(reg, Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); } Register map = no_reg; // Keep the compiler happy. if (expected.NeedsMap()) { map = ToRegister(instr->temp()); ASSERT(!map.is(reg)); __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); if (expected.CanBeUndetectable()) { // Undetectable -> false. __ test_b(FieldOperand(map, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); __ j(not_zero, instr->FalseLabel(chunk_)); } } if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { // spec object -> true. __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); __ j(above_equal, instr->TrueLabel(chunk_)); } if (expected.Contains(ToBooleanStub::STRING)) { // String value -> false iff empty. Label not_string; __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); __ j(above_equal, ¬_string, Label::kNear); __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); __ j(not_zero, instr->TrueLabel(chunk_)); __ jmp(instr->FalseLabel(chunk_)); __ bind(¬_string); } if (expected.Contains(ToBooleanStub::SYMBOL)) { // Symbol value -> true. __ CmpInstanceType(map, SYMBOL_TYPE); __ j(equal, instr->TrueLabel(chunk_)); } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { // heap number -> false iff +0, -0, or NaN. Label not_heap_number; __ cmp(FieldOperand(reg, HeapObject::kMapOffset), factory()->heap_number_map()); __ j(not_equal, ¬_heap_number, Label::kNear); if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); } else { __ fldz(); __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); __ FCmp(); } __ j(zero, instr->FalseLabel(chunk_)); __ jmp(instr->TrueLabel(chunk_)); __ bind(¬_heap_number); } if (!expected.IsGeneric()) { // We've seen something for the first time -> deopt. // This can only happen if we are not generic already. DeoptimizeIf(no_condition, instr->environment()); } } } } void LCodeGen::EmitGoto(int block) { if (!IsNextEmittedBlock(block)) { __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); } } void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { } void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { Condition cond = no_condition; switch (op) { case Token::EQ: case Token::EQ_STRICT: cond = equal; break; case Token::NE: case Token::NE_STRICT: cond = not_equal; break; case Token::LT: cond = is_unsigned ? below : less; break; case Token::GT: cond = is_unsigned ? above : greater; break; case Token::LTE: cond = is_unsigned ? below_equal : less_equal; break; case Token::GTE: cond = is_unsigned ? above_equal : greater_equal; break; case Token::IN: case Token::INSTANCEOF: default: UNREACHABLE(); } return cond; } void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); Condition cc = TokenToCondition(instr->op(), instr->is_double()); if (left->IsConstantOperand() && right->IsConstantOperand()) { // We can statically evaluate the comparison. double left_val = ToDouble(LConstantOperand::cast(left)); double right_val = ToDouble(LConstantOperand::cast(right)); int next_block = EvalComparison(instr->op(), left_val, right_val) ? instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); EmitGoto(next_block); } else { if (instr->is_double()) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); } else { X87LoadForUsage(ToX87Register(right), ToX87Register(left)); __ FCmp(); } // Don't base result on EFLAGS when a NaN is involved. Instead // jump to the false block. __ j(parity_even, instr->FalseLabel(chunk_)); } else { if (right->IsConstantOperand()) { __ cmp(ToOperand(left), ToImmediate(right, instr->hydrogen()->representation())); } else if (left->IsConstantOperand()) { __ cmp(ToOperand(right), ToImmediate(left, instr->hydrogen()->representation())); // We transposed the operands. Reverse the condition. cc = ReverseCondition(cc); } else { __ cmp(ToRegister(left), ToOperand(right)); } } EmitBranch(instr, cc); } } void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { Register left = ToRegister(instr->left()); if (instr->right()->IsConstantOperand()) { Handle right = ToHandle(LConstantOperand::cast(instr->right())); __ CmpObject(left, right); } else { Operand right = ToOperand(instr->right()); __ cmp(left, right); } EmitBranch(instr, equal); } void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { if (instr->hydrogen()->representation().IsTagged()) { Register input_reg = ToRegister(instr->object()); __ cmp(input_reg, factory()->the_hole_value()); EmitBranch(instr, equal); return; } bool use_sse2 = CpuFeatures::IsSupported(SSE2); if (use_sse2) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->object()); __ ucomisd(input_reg, input_reg); EmitFalseBranch(instr, parity_odd); } else { // Put the value to the top of stack X87Register src = ToX87Register(instr->object()); X87LoadForUsage(src); __ fld(0); __ fld(0); __ FCmp(); Label ok; __ j(parity_even, &ok, Label::kNear); __ fstp(0); EmitFalseBranch(instr, no_condition); __ bind(&ok); } __ sub(esp, Immediate(kDoubleSize)); if (use_sse2) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->object()); __ movsd(MemOperand(esp, 0), input_reg); } else { __ fstp_d(MemOperand(esp, 0)); } __ add(esp, Immediate(kDoubleSize)); int offset = sizeof(kHoleNanUpper32); __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); EmitBranch(instr, equal); } void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { Representation rep = instr->hydrogen()->value()->representation(); ASSERT(!rep.IsInteger32()); Register scratch = ToRegister(instr->temp()); if (rep.IsDouble()) { CpuFeatureScope use_sse2(masm(), SSE2); XMMRegister value = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(xmm_scratch, value); EmitFalseBranch(instr, not_equal); __ movmskpd(scratch, value); __ test(scratch, Immediate(1)); EmitBranch(instr, not_zero); } else { Register value = ToRegister(instr->value()); Handle map = masm()->isolate()->factory()->heap_number_map(); __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), Immediate(0x80000000)); EmitFalseBranch(instr, not_equal); __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), Immediate(0x00000000)); EmitBranch(instr, equal); } } Condition LCodeGen::EmitIsObject(Register input, Register temp1, Label* is_not_object, Label* is_object) { __ JumpIfSmi(input, is_not_object); __ cmp(input, isolate()->factory()->null_value()); __ j(equal, is_object); __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset)); // Undetectable objects behave like undefined. __ test_b(FieldOperand(temp1, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); __ j(not_zero, is_not_object); __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset)); __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); __ j(below, is_not_object); __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); return below_equal; } void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { Register reg = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); Condition true_cond = EmitIsObject( reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); EmitBranch(instr, true_cond); } Condition LCodeGen::EmitIsString(Register input, Register temp1, Label* is_not_string, SmiCheck check_needed = INLINE_SMI_CHECK) { if (check_needed == INLINE_SMI_CHECK) { __ JumpIfSmi(input, is_not_string); } Condition cond = masm_->IsObjectStringType(input, temp1, temp1); return cond; } void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { Register reg = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Condition true_cond = EmitIsString( reg, temp, instr->FalseLabel(chunk_), check_needed); EmitBranch(instr, true_cond); } void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { Operand input = ToOperand(instr->value()); __ test(input, Immediate(kSmiTagMask)); EmitBranch(instr, zero); } void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); if (!instr->hydrogen()->value()->IsHeapObject()) { STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); __ test_b(FieldOperand(temp, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); EmitBranch(instr, not_zero); } static Condition ComputeCompareCondition(Token::Value op) { switch (op) { case Token::EQ_STRICT: case Token::EQ: return equal; case Token::LT: return less; case Token::GT: return greater; case Token::LTE: return less_equal; case Token::GTE: return greater_equal; default: UNREACHABLE(); return no_condition; } } void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { Token::Value op = instr->op(); Handle ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = ComputeCompareCondition(op); __ test(eax, Operand(eax)); EmitBranch(instr, condition); } static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == FIRST_TYPE) return to; ASSERT(from == to || to == LAST_TYPE); return from; } static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { InstanceType from = instr->from(); InstanceType to = instr->to(); if (from == to) return equal; if (to == LAST_TYPE) return above_equal; if (from == FIRST_TYPE) return below_equal; UNREACHABLE(); return equal; } void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); if (!instr->hydrogen()->value()->IsHeapObject()) { __ JumpIfSmi(input, instr->FalseLabel(chunk_)); } __ CmpObjectType(input, TestType(instr->hydrogen()), temp); EmitBranch(instr, BranchCondition(instr->hydrogen())); } void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); __ AssertString(input); __ mov(result, FieldOperand(input, String::kHashFieldOffset)); __ IndexFromHash(result, result); } void LCodeGen::DoHasCachedArrayIndexAndBranch( LHasCachedArrayIndexAndBranch* instr) { Register input = ToRegister(instr->value()); __ test(FieldOperand(input, String::kHashFieldOffset), Immediate(String::kContainsCachedArrayIndexMask)); EmitBranch(instr, equal); } // Branches to a label or falls through with the answer in the z flag. Trashes // the temp registers, but not the input. void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, Handleclass_name, Register input, Register temp, Register temp2) { ASSERT(!input.is(temp)); ASSERT(!input.is(temp2)); ASSERT(!temp.is(temp2)); __ JumpIfSmi(input, is_false); if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { // Assuming the following assertions, we can use the same compares to test // for both being a function type and being in the object type range. STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == FIRST_SPEC_OBJECT_TYPE + 1); STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_SPEC_OBJECT_TYPE - 1); STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); __ j(below, is_false); __ j(equal, is_true); __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); __ j(equal, is_true); } else { // Faster code path to avoid two compares: subtract lower bound from the // actual type and do a signed compare with the width of the type range. __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ j(above, is_false); } // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { __ j(not_equal, is_true); } else { __ j(not_equal, is_false); } // temp now contains the constructor function. Grab the // instance class name from there. __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); __ mov(temp, FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); // The class name we are testing against is internalized since it's a literal. // The name in the constructor is internalized because of the way the context // is booted. This routine isn't expected to work for random API-created // classes and it doesn't have to because you can't access it with natives // syntax. Since both sides are internalized it is sufficient to use an // identity comparison. __ cmp(temp, class_name); // End with the answer in the z flag. } void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); Register temp2 = ToRegister(instr->temp2()); Handle class_name = instr->hydrogen()->class_name(); EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), class_name, input, temp, temp2); EmitBranch(instr, equal); } void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { Register reg = ToRegister(instr->value()); __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); EmitBranch(instr, equal); } void LCodeGen::DoInstanceOf(LInstanceOf* instr) { // Object and function are in fixed registers defined by the stub. ASSERT(ToRegister(instr->context()).is(esi)); InstanceofStub stub(InstanceofStub::kArgsInRegisters); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); Label true_value, done; __ test(eax, Operand(eax)); __ j(zero, &true_value, Label::kNear); __ mov(ToRegister(instr->result()), factory()->false_value()); __ jmp(&done, Label::kNear); __ bind(&true_value); __ mov(ToRegister(instr->result()), factory()->true_value()); __ bind(&done); } void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { public: DeferredInstanceOfKnownGlobal(LCodeGen* codegen, LInstanceOfKnownGlobal* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } Label* map_check() { return &map_check_; } private: LInstanceOfKnownGlobal* instr_; Label map_check_; }; DeferredInstanceOfKnownGlobal* deferred; deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); Label done, false_result; Register object = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); // A Smi is not an instance of anything. __ JumpIfSmi(object, &false_result, Label::kNear); // This is the inlined call site instanceof cache. The two occurences of the // hole value will be patched to the last map/result pair generated by the // instanceof stub. Label cache_miss; Register map = ToRegister(instr->temp()); __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); __ bind(deferred->map_check()); // Label for calculating code patching. Handle cache_cell = factory()->NewCell(factory()->the_hole_value()); __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map. __ j(not_equal, &cache_miss, Label::kNear); __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. __ jmp(&done, Label::kNear); // The inlined call site cache did not match. Check for null and string // before calling the deferred code. __ bind(&cache_miss); // Null is not an instance of anything. __ cmp(object, factory()->null_value()); __ j(equal, &false_result, Label::kNear); // String values are not instances of anything. Condition is_string = masm_->IsObjectStringType(object, temp, temp); __ j(is_string, &false_result, Label::kNear); // Go to the deferred code. __ jmp(deferred->entry()); __ bind(&false_result); __ mov(ToRegister(instr->result()), factory()->false_value()); // Here result has either true or false. Deferred code also produces true or // false object. __ bind(deferred->exit()); __ bind(&done); } void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check) { PushSafepointRegistersScope scope(this); InstanceofStub::Flags flags = InstanceofStub::kNoFlags; flags = static_cast( flags | InstanceofStub::kArgsInRegisters); flags = static_cast( flags | InstanceofStub::kCallSiteInlineCheck); flags = static_cast( flags | InstanceofStub::kReturnTrueFalseObject); InstanceofStub stub(flags); // Get the temp register reserved by the instruction. This needs to be a // register which is pushed last by PushSafepointRegisters as top of the // stack is used to pass the offset to the location of the map check to // the stub. Register temp = ToRegister(instr->temp()); ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0); __ LoadHeapObject(InstanceofStub::right(), instr->function()); static const int kAdditionalDelta = 13; int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; __ mov(temp, Immediate(delta)); __ StoreToSafepointRegisterSlot(temp, temp); CallCodeGeneric(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); // Get the deoptimization index of the LLazyBailout-environment that // corresponds to this instruction. LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); // Put the result value into the eax slot and restore all registers. __ StoreToSafepointRegisterSlot(eax, eax); } void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); Handle ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = ComputeCompareCondition(op); Label true_value, done; __ test(eax, Operand(eax)); __ j(condition, &true_value, Label::kNear); __ mov(ToRegister(instr->result()), factory()->false_value()); __ jmp(&done, Label::kNear); __ bind(&true_value); __ mov(ToRegister(instr->result()), factory()->true_value()); __ bind(&done); } void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) { int extra_value_count = dynamic_frame_alignment ? 2 : 1; if (instr->has_constant_parameter_count()) { int parameter_count = ToInteger32(instr->constant_parameter_count()); if (dynamic_frame_alignment && FLAG_debug_code) { __ cmp(Operand(esp, (parameter_count + extra_value_count) * kPointerSize), Immediate(kAlignmentZapValue)); __ Assert(equal, kExpectedAlignmentMarker); } __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); } else { Register reg = ToRegister(instr->parameter_count()); // The argument count parameter is a smi __ SmiUntag(reg); Register return_addr_reg = reg.is(ecx) ? ebx : ecx; if (dynamic_frame_alignment && FLAG_debug_code) { ASSERT(extra_value_count == 2); __ cmp(Operand(esp, reg, times_pointer_size, extra_value_count * kPointerSize), Immediate(kAlignmentZapValue)); __ Assert(equal, kExpectedAlignmentMarker); } // emit code to restore stack based on instr->parameter_count() __ pop(return_addr_reg); // save return address if (dynamic_frame_alignment) { __ inc(reg); // 1 more for alignment } __ shl(reg, kPointerSizeLog2); __ add(esp, reg); __ jmp(return_addr_reg); } } void LCodeGen::DoReturn(LReturn* instr) { if (FLAG_trace && info()->IsOptimizing()) { // Preserve the return value on the stack and rely on the runtime call // to return the value in the same register. We're leaving the code // managed by the register allocator and tearing down the frame, it's // safe to write to the context register. __ push(eax); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { RestoreCallerDoubles(); } if (dynamic_frame_alignment_) { // Fetch the state of the dynamic frame alignment. __ mov(edx, Operand(ebp, JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); } int no_frame_start = -1; if (NeedsEagerFrame()) { __ mov(esp, ebp); __ pop(ebp); no_frame_start = masm_->pc_offset(); } if (dynamic_frame_alignment_) { Label no_padding; __ cmp(edx, Immediate(kNoAlignmentPadding)); __ j(equal, &no_padding, Label::kNear); EmitReturn(instr, true); __ bind(&no_padding); } EmitReturn(instr, false); if (no_frame_start != -1) { info()->AddNoFrameRange(no_frame_start, masm_->pc_offset()); } } void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle())); if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(result, factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } } void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->global_object()).is(edx)); ASSERT(ToRegister(instr->result()).is(eax)); __ mov(ecx, instr->name()); RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET : RelocInfo::CODE_TARGET_CONTEXT; Handle ic = isolate()->builtins()->LoadIC_Initialize(); CallCode(ic, mode, instr); } void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->value()); Handle cell_handle = instr->hydrogen()->cell().handle(); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. We deoptimize in that case. if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } // Store the value. __ mov(Operand::ForCell(cell_handle), value); // Cells are always rescanned, so no write barrier here. } void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->global_object()).is(edx)); ASSERT(ToRegister(instr->value()).is(eax)); __ mov(ecx, instr->name()); Handle ic = (instr->strict_mode_flag() == kStrictMode) ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); } void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); __ mov(result, ContextOperand(context, instr->slot_index())); if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(result, factory()->the_hole_value()); if (instr->hydrogen()->DeoptimizesOnHole()) { DeoptimizeIf(equal, instr->environment()); } else { Label is_not_hole; __ j(not_equal, &is_not_hole, Label::kNear); __ mov(result, factory()->undefined_value()); __ bind(&is_not_hole); } } } void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); Label skip_assignment; Operand target = ContextOperand(context, instr->slot_index()); if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(target, factory()->the_hole_value()); if (instr->hydrogen()->DeoptimizesOnHole()) { DeoptimizeIf(equal, instr->environment()); } else { __ j(not_equal, &skip_assignment, Label::kNear); } } __ mov(target, value); if (instr->hydrogen()->NeedsWriteBarrier()) { SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Register temp = ToRegister(instr->temp()); int offset = Context::SlotOffset(instr->slot_index()); __ RecordWriteContextSlot(context, offset, value, temp, GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } __ bind(&skip_assignment); } void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { HObjectAccess access = instr->hydrogen()->access(); int offset = access.offset(); if (access.IsExternalMemory()) { Register result = ToRegister(instr->result()); MemOperand operand = instr->object()->IsConstantOperand() ? MemOperand::StaticVariable(ToExternalReference( LConstantOperand::cast(instr->object()))) : MemOperand(ToRegister(instr->object()), offset); __ Load(result, operand, access.representation()); return; } Register object = ToRegister(instr->object()); if (FLAG_track_double_fields && instr->hydrogen()->representation().IsDouble()) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister result = ToDoubleRegister(instr->result()); __ movsd(result, FieldOperand(object, offset)); } else { X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); } return; } Register result = ToRegister(instr->result()); if (!access.IsInobject()) { __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); object = result; } __ Load(result, FieldOperand(object, offset), access.representation()); } void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { ASSERT(!operand->IsDoubleRegister()); if (operand->IsConstantOperand()) { Handle object = ToHandle(LConstantOperand::cast(operand)); AllowDeferredHandleDereference smi_check; if (object->IsSmi()) { __ Push(Handle::cast(object)); } else { __ PushHeapObject(Handle::cast(object)); } } else if (operand->IsRegister()) { __ push(ToRegister(operand)); } else { __ push(ToOperand(operand)); } } void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->object()).is(edx)); ASSERT(ToRegister(instr->result()).is(eax)); __ mov(ecx, instr->name()); Handle ic = isolate()->builtins()->LoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { Register function = ToRegister(instr->function()); Register temp = ToRegister(instr->temp()); Register result = ToRegister(instr->result()); // Check that the function really is a function. __ CmpObjectType(function, JS_FUNCTION_TYPE, result); DeoptimizeIf(not_equal, instr->environment()); // Check whether the function has an instance prototype. Label non_instance; __ test_b(FieldOperand(result, Map::kBitFieldOffset), 1 << Map::kHasNonInstancePrototype); __ j(not_zero, &non_instance, Label::kNear); // Get the prototype or initial map from the function. __ mov(result, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); // Check that the function has a prototype or an initial map. __ cmp(Operand(result), Immediate(factory()->the_hole_value())); DeoptimizeIf(equal, instr->environment()); // If the function does not have an initial map, we're done. Label done; __ CmpObjectType(result, MAP_TYPE, temp); __ j(not_equal, &done, Label::kNear); // Get the prototype from the initial map. __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); __ jmp(&done, Label::kNear); // Non-instance prototype: Fetch prototype from constructor field // in the function's map. __ bind(&non_instance); __ mov(result, FieldOperand(result, Map::kConstructorOffset)); // All done. __ bind(&done); } void LCodeGen::DoLoadRoot(LLoadRoot* instr) { Register result = ToRegister(instr->result()); __ LoadRoot(result, instr->index()); } void LCodeGen::DoLoadExternalArrayPointer( LLoadExternalArrayPointer* instr) { Register result = ToRegister(instr->result()); Register input = ToRegister(instr->object()); __ mov(result, FieldOperand(input, ExternalArray::kExternalPointerOffset)); } void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); Register result = ToRegister(instr->result()); if (instr->length()->IsConstantOperand() && instr->index()->IsConstantOperand()) { int const_index = ToInteger32(LConstantOperand::cast(instr->index())); int const_length = ToInteger32(LConstantOperand::cast(instr->length())); int index = (const_length - const_index) + 1; __ mov(result, Operand(arguments, index * kPointerSize)); } else { Register length = ToRegister(instr->length()); Operand index = ToOperand(instr->index()); // There are two words between the frame pointer and the last argument. // Subtracting from length accounts for one of them add one more. __ sub(length, index); __ mov(result, Operand(arguments, length, times_4, kPointerSize)); } } void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); if (!key->IsConstantOperand() && ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), elements_kind)) { __ SmiUntag(ToRegister(key)); } Operand operand(BuildFastArrayOperand( instr->elements(), key, instr->hydrogen()->key()->representation(), elements_kind, 0, instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister result(ToDoubleRegister(instr->result())); __ movss(result, operand); __ cvtss2sd(result, result); } else { X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); } } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); __ movsd(ToDoubleRegister(instr->result()), operand); } else { X87Mov(ToX87Register(instr->result()), operand); } } else { Register result(ToRegister(instr->result())); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: __ movsx_b(result, operand); break; case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: __ movzx_b(result, operand); break; case EXTERNAL_SHORT_ELEMENTS: __ movsx_w(result, operand); break; case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: __ movzx_w(result, operand); break; case EXTERNAL_INT_ELEMENTS: __ mov(result, operand); break; case EXTERNAL_UNSIGNED_INT_ELEMENTS: __ mov(result, operand); if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { __ test(result, Operand(result)); DeoptimizeIf(negative, instr->environment()); } break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_SMI_ELEMENTS: case FAST_HOLEY_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); break; } } } void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { if (instr->hydrogen()->RequiresHoleCheck()) { int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + sizeof(kHoleNanLower32); Operand hole_check_operand = BuildFastArrayOperand( instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, offset, instr->additional_index()); __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); DeoptimizeIf(equal, instr->environment()); } Operand double_load_operand = BuildFastArrayOperand( instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, FixedDoubleArray::kHeaderSize - kHeapObjectTag, instr->additional_index()); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister result = ToDoubleRegister(instr->result()); __ movsd(result, double_load_operand); } else { X87Mov(ToX87Register(instr->result()), double_load_operand); } } void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { Register result = ToRegister(instr->result()); // Load the result. __ mov(result, BuildFastArrayOperand(instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag, instr->additional_index())); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { __ test(result, Immediate(kSmiTagMask)); DeoptimizeIf(not_equal, instr->environment()); } else { __ cmp(result, factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } } } void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { if (instr->is_external()) { DoLoadKeyedExternalArray(instr); } else if (instr->hydrogen()->representation().IsDouble()) { DoLoadKeyedFixedDoubleArray(instr); } else { DoLoadKeyedFixedArray(instr); } } Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, Representation key_representation, ElementsKind elements_kind, uint32_t offset, uint32_t additional_index) { Register elements_pointer_reg = ToRegister(elements_pointer); int element_shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = element_shift_size; if (key->IsConstantOperand()) { int constant_value = ToInteger32(LConstantOperand::cast(key)); if (constant_value & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } return Operand(elements_pointer_reg, ((constant_value + additional_index) << shift_size) + offset); } else { // Take the tag bit into account while computing the shift size. if (key_representation.IsSmi() && (shift_size >= 1)) { shift_size -= kSmiTagSize; } ScaleFactor scale_factor = static_cast(shift_size); return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset + (additional_index << element_shift_size)); } } void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->object()).is(edx)); ASSERT(ToRegister(instr->key()).is(ecx)); Handle ic = isolate()->builtins()->KeyedLoadIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { Register result = ToRegister(instr->result()); if (instr->hydrogen()->from_inlined()) { __ lea(result, Operand(esp, -2 * kPointerSize)); } else { // Check for arguments adapter frame. Label done, adapted; __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(result, Operand(result, StandardFrameConstants::kContextOffset)); __ cmp(Operand(result), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adapted, Label::kNear); // No arguments adaptor frame. __ mov(result, Operand(ebp)); __ jmp(&done, Label::kNear); // Arguments adaptor frame present. __ bind(&adapted); __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); // Result is the frame pointer for the frame if not adapted and for the real // frame below the adaptor frame if adapted. __ bind(&done); } } void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { Operand elem = ToOperand(instr->elements()); Register result = ToRegister(instr->result()); Label done; // If no arguments adaptor frame the number of arguments is fixed. __ cmp(ebp, elem); __ mov(result, Immediate(scope()->num_parameters())); __ j(equal, &done, Label::kNear); // Arguments adaptor frame present. Get argument length from there. __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(result, Operand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ SmiUntag(result); // Argument length is in result register. __ bind(&done); } void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { Register receiver = ToRegister(instr->receiver()); Register function = ToRegister(instr->function()); Register scratch = ToRegister(instr->temp()); // If the receiver is null or undefined, we have to pass the global // object as a receiver to normal functions. Values have to be // passed unchanged to builtins and strict-mode functions. Label global_object, receiver_ok; Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; // Do not transform the receiver to object for strict mode // functions. __ mov(scratch, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), 1 << SharedFunctionInfo::kStrictModeBitWithinByte); __ j(not_equal, &receiver_ok, dist); // Do not transform the receiver to object for builtins. __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), 1 << SharedFunctionInfo::kNativeBitWithinByte); __ j(not_equal, &receiver_ok, dist); // Normal function. Replace undefined or null with global receiver. __ cmp(receiver, factory()->null_value()); __ j(equal, &global_object, Label::kNear); __ cmp(receiver, factory()->undefined_value()); __ j(equal, &global_object, Label::kNear); // The receiver should be a JS object. __ test(receiver, Immediate(kSmiTagMask)); DeoptimizeIf(equal, instr->environment()); __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); DeoptimizeIf(below, instr->environment()); __ jmp(&receiver_ok, Label::kNear); __ bind(&global_object); // TODO(kmillikin): We have a hydrogen value for the global object. See // if it's better to use it than to explicitly fetch it from the context // here. __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX)); __ mov(receiver, FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); __ bind(&receiver_ok); } void LCodeGen::DoApplyArguments(LApplyArguments* instr) { Register receiver = ToRegister(instr->receiver()); Register function = ToRegister(instr->function()); Register length = ToRegister(instr->length()); Register elements = ToRegister(instr->elements()); ASSERT(receiver.is(eax)); // Used for parameter count. ASSERT(function.is(edi)); // Required by InvokeFunction. ASSERT(ToRegister(instr->result()).is(eax)); // Copy the arguments to this function possibly from the // adaptor frame below it. const uint32_t kArgumentsLimit = 1 * KB; __ cmp(length, kArgumentsLimit); DeoptimizeIf(above, instr->environment()); __ push(receiver); __ mov(receiver, length); // Loop through the arguments pushing them onto the execution // stack. Label invoke, loop; // length is a small non-negative integer, due to the test above. __ test(length, Operand(length)); __ j(zero, &invoke, Label::kNear); __ bind(&loop); __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); __ dec(length); __ j(not_zero, &loop); // Invoke the function. __ bind(&invoke); ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount actual(eax); __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator, CALL_AS_METHOD); } void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ int3(); } void LCodeGen::DoPushArgument(LPushArgument* instr) { LOperand* argument = instr->value(); EmitPushTaggedOperand(argument); } void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } void LCodeGen::DoThisFunction(LThisFunction* instr) { Register result = ToRegister(instr->result()); __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); } void LCodeGen::DoContext(LContext* instr) { Register result = ToRegister(instr->result()); if (info()->IsOptimizing()) { __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); } else { // If there is no frame, the context must be in esi. ASSERT(result.is(esi)); } } void LCodeGen::DoOuterContext(LOuterContext* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); __ mov(result, Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); } void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { ASSERT(ToRegister(instr->context()).is(esi)); __ push(esi); // The context is the first argument. __ push(Immediate(instr->hydrogen()->pairs())); __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); CallRuntime(Runtime::kDeclareGlobals, 3, instr); } void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); } void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { Register global = ToRegister(instr->global()); Register result = ToRegister(instr->result()); __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset)); } void LCodeGen::CallKnownFunction(Handle function, int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, EDIState edi_state) { bool dont_adapt_arguments = formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; bool can_invoke_directly = dont_adapt_arguments || formal_parameter_count == arity; if (can_invoke_directly) { if (edi_state == EDI_UNINITIALIZED) { __ LoadHeapObject(edi, function); } // Change context. __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Set eax to arguments count if adaption is not needed. Assumes that eax // is available to write to at this point. if (dont_adapt_arguments) { __ mov(eax, arity); } // Invoke function directly. __ SetCallKind(ecx, call_kind); if (function.is_identical_to(info()->closure())) { __ CallSelf(); } else { __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); } RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); } else { // We need to adapt arguments. LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); ParameterCount expected(formal_parameter_count); __ InvokeFunction( function, expected, count, CALL_FUNCTION, generator, call_kind); } } void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(eax)); CallKnownFunction(instr->hydrogen()->function(), instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, EDI_UNINITIALIZED); } void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { Register input_reg = ToRegister(instr->value()); __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); DeoptimizeIf(not_equal, instr->environment()); Label slow, allocated, done; Register tmp = input_reg.is(eax) ? ecx : eax; Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; // Preserve the value of all registers. PushSafepointRegistersScope scope(this); __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); // Check the sign of the argument. If the argument is positive, just // return it. We do not need to patch the stack since |input| and // |result| are the same register and |input| will be restored // unchanged by popping safepoint registers. __ test(tmp, Immediate(HeapNumber::kSignMask)); __ j(zero, &done, Label::kNear); __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); __ jmp(&allocated, Label::kNear); // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp.is(eax)) __ mov(tmp, eax); // Restore input_reg after call to runtime. __ LoadFromSafepointRegisterSlot(input_reg, input_reg); __ bind(&allocated); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); __ and_(tmp2, ~HeapNumber::kSignMask); __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); __ StoreToSafepointRegisterSlot(input_reg, tmp); __ bind(&done); } void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { Register input_reg = ToRegister(instr->value()); __ test(input_reg, Operand(input_reg)); Label is_positive; __ j(not_sign, &is_positive, Label::kNear); __ neg(input_reg); // Sets flags. DeoptimizeIf(negative, instr->environment()); __ bind(&is_positive); } void LCodeGen::DoMathAbs(LMathAbs* instr) { // Class for deferred case. class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { public: DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LMathAbs* instr_; }; ASSERT(instr->value()->Equals(instr->result())); Representation r = instr->hydrogen()->value()->representation(); CpuFeatureScope scope(masm(), SSE2); if (r.IsDouble()) { XMMRegister scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ xorps(scratch, scratch); __ subsd(scratch, input_reg); __ andps(input_reg, scratch); } else if (r.IsSmiOrInteger32()) { EmitIntegerMathAbs(instr); } else { // Tagged case. DeferredMathAbsTaggedHeapNumber* deferred = new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); Register input_reg = ToRegister(instr->value()); // Smi check. __ JumpIfNotSmi(input_reg, deferred->entry()); EmitIntegerMathAbs(instr); __ bind(deferred->exit()); } } void LCodeGen::DoMathFloor(LMathFloor* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope scope(masm(), SSE4_1); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Deoptimize on negative zero. Label non_zero; __ xorps(xmm_scratch, xmm_scratch); // Zero the register. __ ucomisd(input_reg, xmm_scratch); __ j(not_equal, &non_zero, Label::kNear); __ movmskpd(output_reg, input_reg); __ test(output_reg, Immediate(1)); DeoptimizeIf(not_zero, instr->environment()); __ bind(&non_zero); } __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); __ cvttsd2si(output_reg, Operand(xmm_scratch)); // Overflow is signalled with minint. __ cmp(output_reg, 0x80000000u); DeoptimizeIf(equal, instr->environment()); } else { Label negative_sign, done; // Deoptimize on unordered. __ xorps(xmm_scratch, xmm_scratch); // Zero the register. __ ucomisd(input_reg, xmm_scratch); DeoptimizeIf(parity_even, instr->environment()); __ j(below, &negative_sign, Label::kNear); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Check for negative zero. Label positive_sign; __ j(above, &positive_sign, Label::kNear); __ movmskpd(output_reg, input_reg); __ test(output_reg, Immediate(1)); DeoptimizeIf(not_zero, instr->environment()); __ Set(output_reg, Immediate(0)); __ jmp(&done, Label::kNear); __ bind(&positive_sign); } // Use truncating instruction (OK because input is positive). __ cvttsd2si(output_reg, Operand(input_reg)); // Overflow is signalled with minint. __ cmp(output_reg, 0x80000000u); DeoptimizeIf(equal, instr->environment()); __ jmp(&done, Label::kNear); // Non-zero negative reaches here. __ bind(&negative_sign); // Truncate, then compare and compensate. __ cvttsd2si(output_reg, Operand(input_reg)); __ Cvtsi2sd(xmm_scratch, output_reg); __ ucomisd(input_reg, xmm_scratch); __ j(equal, &done, Label::kNear); __ sub(output_reg, Immediate(1)); DeoptimizeIf(overflow, instr->environment()); __ bind(&done); } } void LCodeGen::DoMathRound(LMathRound* instr) { CpuFeatureScope scope(masm(), SSE2); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_temp = ToDoubleRegister(instr->temp()); ExternalReference one_half = ExternalReference::address_of_one_half(); ExternalReference minus_one_half = ExternalReference::address_of_minus_one_half(); Label done, round_to_zero, below_one_half, do_not_compensate; Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); __ ucomisd(xmm_scratch, input_reg); __ j(above, &below_one_half, Label::kNear); // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). __ addsd(xmm_scratch, input_reg); __ cvttsd2si(output_reg, Operand(xmm_scratch)); // Overflow is signalled with minint. __ cmp(output_reg, 0x80000000u); __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); __ jmp(&done, dist); __ bind(&below_one_half); __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); __ ucomisd(xmm_scratch, input_reg); __ j(below_equal, &round_to_zero, Label::kNear); // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then // compare and compensate. __ movaps(input_temp, input_reg); // Do not alter input_reg. __ subsd(input_temp, xmm_scratch); __ cvttsd2si(output_reg, Operand(input_temp)); // Catch minint due to overflow, and to prevent overflow when compensating. __ cmp(output_reg, 0x80000000u); __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); __ Cvtsi2sd(xmm_scratch, output_reg); __ ucomisd(xmm_scratch, input_temp); __ j(equal, &done, dist); __ sub(output_reg, Immediate(1)); // No overflow because we already ruled out minint. __ jmp(&done, dist); __ bind(&round_to_zero); // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if // we can ignore the difference between a result of -0 and +0. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // If the sign is positive, we return +0. __ movmskpd(output_reg, input_reg); __ test(output_reg, Immediate(1)); __ RecordComment("Minus zero"); DeoptimizeIf(not_zero, instr->environment()); } __ Set(output_reg, Immediate(0)); __ bind(&done); } void LCodeGen::DoMathSqrt(LMathSqrt* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); __ sqrtsd(input_reg, input_reg); } void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = ToRegister(instr->temp()); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity // Math.sqrt(-Infinity) == NaN Label done, sqrt; // Check base for -Infinity. According to IEEE-754, single-precision // -Infinity has the highest 9 bits set and the lowest 23 bits cleared. __ mov(scratch, 0xFF800000); __ movd(xmm_scratch, scratch); __ cvtss2sd(xmm_scratch, xmm_scratch); __ ucomisd(input_reg, xmm_scratch); // Comparing -Infinity with NaN results in "unordered", which sets the // zero flag as if both were equal. However, it also sets the carry flag. __ j(not_equal, &sqrt, Label::kNear); __ j(carry, &sqrt, Label::kNear); // If input is -Infinity, return Infinity. __ xorps(input_reg, input_reg); __ subsd(input_reg, xmm_scratch); __ jmp(&done, Label::kNear); // Square root. __ bind(&sqrt); __ xorps(xmm_scratch, xmm_scratch); __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. __ sqrtsd(input_reg, input_reg); __ bind(&done); } void LCodeGen::DoPower(LPower* instr) { Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. ASSERT(!instr->right()->IsDoubleRegister() || ToDoubleRegister(instr->right()).is(xmm1)); ASSERT(!instr->right()->IsRegister() || ToRegister(instr->right()).is(eax)); ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); if (exponent_type.IsSmi()) { MathPowStub stub(MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsTagged()) { Label no_deopt; __ JumpIfSmi(eax, &no_deopt); __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); DeoptimizeIf(not_equal, instr->environment()); __ bind(&no_deopt); MathPowStub stub(MathPowStub::TAGGED); __ CallStub(&stub); } else if (exponent_type.IsInteger32()) { MathPowStub stub(MathPowStub::INTEGER); __ CallStub(&stub); } else { ASSERT(exponent_type.IsDouble()); MathPowStub stub(MathPowStub::DOUBLE); __ CallStub(&stub); } } void LCodeGen::DoMathLog(LMathLog* instr) { CpuFeatureScope scope(masm(), SSE2); ASSERT(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); XMMRegister xmm_scratch = double_scratch0(); Label positive, done, zero; __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(input_reg, xmm_scratch); __ j(above, &positive, Label::kNear); __ j(equal, &zero, Label::kNear); ExternalReference nan = ExternalReference::address_of_canonical_non_hole_nan(); __ movsd(input_reg, Operand::StaticVariable(nan)); __ jmp(&done, Label::kNear); __ bind(&zero); ExternalReference ninf = ExternalReference::address_of_negative_infinity(); __ movsd(input_reg, Operand::StaticVariable(ninf)); __ jmp(&done, Label::kNear); __ bind(&positive); __ fldln2(); __ sub(Operand(esp), Immediate(kDoubleSize)); __ movsd(Operand(esp, 0), input_reg); __ fld_d(Operand(esp, 0)); __ fyl2x(); __ fstp_d(Operand(esp, 0)); __ movsd(input_reg, Operand(esp, 0)); __ add(Operand(esp), Immediate(kDoubleSize)); __ bind(&done); } void LCodeGen::DoMathExp(LMathExp* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input = ToDoubleRegister(instr->value()); XMMRegister result = ToDoubleRegister(instr->result()); XMMRegister temp0 = double_scratch0(); Register temp1 = ToRegister(instr->temp1()); Register temp2 = ToRegister(instr->temp2()); MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); } void LCodeGen::DoMathTan(LMathTan* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); // Set the context register to a GC-safe fake value. Clobbering it is // OK because this instruction is marked as a call. __ Set(esi, Immediate(0)); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoMathCos(LMathCos* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); // Set the context register to a GC-safe fake value. Clobbering it is // OK because this instruction is marked as a call. __ Set(esi, Immediate(0)); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoMathSin(LMathSin* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); // Set the context register to a GC-safe fake value. Clobbering it is // OK because this instruction is marked as a call. __ Set(esi, Immediate(0)); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->function()).is(edi)); ASSERT(instr->HasPointerMap()); Handle known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount count(instr->arity()); __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); } else { CallKnownFunction(known_function, instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, EDI_CONTAINS_TARGET); } } void LCodeGen::DoCallKeyed(LCallKeyed* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->key()).is(ecx)); ASSERT(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); Handle ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoCallNamed(LCallNamed* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); RelocInfo::Mode mode = RelocInfo::CODE_TARGET; Handle ic = isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ mov(ecx, instr->name()); CallCode(ic, mode, instr); } void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->function()).is(edi)); ASSERT(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); if (instr->hydrogen()->IsTailCall()) { if (NeedsEagerFrame()) __ leave(); __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); } else { CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } void LCodeGen::DoCallGlobal(LCallGlobal* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; Handle ic = isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ mov(ecx, instr->name()); CallCode(ic, mode, instr); } void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(eax)); CallKnownFunction(instr->hydrogen()->target(), instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_FUNCTION, EDI_UNINITIALIZED); } void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->constructor()).is(edi)); ASSERT(ToRegister(instr->result()).is(eax)); // No cell in ebx for construct type feedback in optimized code Handle undefined_value(isolate()->factory()->undefined_value()); __ mov(ebx, Immediate(undefined_value)); CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ Set(eax, Immediate(instr->arity())); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } void LCodeGen::DoCallNewArray(LCallNewArray* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->constructor()).is(edi)); ASSERT(ToRegister(instr->result()).is(eax)); __ Set(eax, Immediate(instr->arity())); __ mov(ebx, instr->hydrogen()->property_cell()); ElementsKind kind = instr->hydrogen()->elements_kind(); AllocationSiteOverrideMode override_mode = (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) ? DISABLE_ALLOCATION_SITES : DONT_OVERRIDE; ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED; if (instr->arity() == 0) { ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { Label done; if (IsFastPackedElementsKind(kind)) { Label packed_case; // We might need a change here // look at the first argument __ mov(ecx, Operand(esp, 0)); __ test(ecx, ecx); __ j(zero, &packed_case, Label::kNear); ElementsKind holey_kind = GetHoleyElementsKind(kind); ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, override_mode); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); __ jmp(&done, Label::kNear); __ bind(&packed_case); } ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); __ bind(&done); } else { ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } } void LCodeGen::DoCallRuntime(LCallRuntime* instr) { ASSERT(ToRegister(instr->context()).is(esi)); CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); } void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { Register function = ToRegister(instr->function()); Register code_object = ToRegister(instr->code_object()); __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); } void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { Register result = ToRegister(instr->result()); Register base = ToRegister(instr->base_object()); if (instr->offset()->IsConstantOperand()) { LConstantOperand* offset = LConstantOperand::cast(instr->offset()); __ lea(result, Operand(base, ToInteger32(offset))); } else { Register offset = ToRegister(instr->offset()); __ lea(result, Operand(base, offset, times_1, 0)); } } void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Representation representation = instr->representation(); HObjectAccess access = instr->hydrogen()->access(); int offset = access.offset(); if (access.IsExternalMemory()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); MemOperand operand = instr->object()->IsConstantOperand() ? MemOperand::StaticVariable( ToExternalReference(LConstantOperand::cast(instr->object()))) : MemOperand(ToRegister(instr->object()), offset); if (instr->value()->IsConstantOperand()) { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); __ mov(operand, Immediate(ToInteger32(operand_value))); } else { Register value = ToRegister(instr->value()); __ Store(value, operand, representation); } return; } Register object = ToRegister(instr->object()); Handle transition = instr->transition(); if (FLAG_track_fields && representation.IsSmi()) { if (instr->value()->IsConstantOperand()) { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (!IsSmi(operand_value)) { DeoptimizeIf(no_condition, instr->environment()); } } } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { if (instr->value()->IsConstantOperand()) { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (IsInteger32(operand_value)) { DeoptimizeIf(no_condition, instr->environment()); } } else { if (!instr->hydrogen()->value()->type().IsHeapObject()) { Register value = ToRegister(instr->value()); __ test(value, Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); } } } else if (FLAG_track_double_fields && representation.IsDouble()) { ASSERT(transition.is_null()); ASSERT(access.IsInobject()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister value = ToDoubleRegister(instr->value()); __ movsd(FieldOperand(object, offset), value); } else { X87Register value = ToX87Register(instr->value()); X87Mov(FieldOperand(object, offset), value); } return; } if (!transition.is_null()) { if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); } else { Register temp = ToRegister(instr->temp()); Register temp_map = ToRegister(instr->temp_map()); __ mov(temp_map, transition); __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); // Update the write barrier for the map field. __ RecordWriteField(object, HeapObject::kMapOffset, temp_map, temp, GetSaveFPRegsMode(), OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); } } // Do the store. SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; Register write_register = object; if (!access.IsInobject()) { write_register = ToRegister(instr->temp()); __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); } MemOperand operand = FieldOperand(write_register, offset); if (instr->value()->IsConstantOperand()) { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (operand_value->IsRegister()) { Register value = ToRegister(operand_value); __ Store(value, operand, representation); } else if (representation.IsInteger32()) { Immediate immediate = ToImmediate(operand_value, representation); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); __ mov(operand, immediate); } else { Handle handle_value = ToHandle(operand_value); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); __ mov(operand, handle_value); } } else { Register value = ToRegister(instr->value()); __ Store(value, operand, representation); } if (instr->hydrogen()->NeedsWriteBarrier()) { Register value = ToRegister(instr->value()); Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; // Update the write barrier for the object for in-object properties. __ RecordWriteField(write_register, offset, value, temp, GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } } void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->object()).is(edx)); ASSERT(ToRegister(instr->value()).is(eax)); __ mov(ecx, instr->name()); Handle ic = (instr->strict_mode_flag() == kStrictMode) ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { if (FLAG_debug_code && check->hydrogen()->skip_check()) { Label done; __ j(NegateCondition(cc), &done, Label::kNear); __ int3(); __ bind(&done); } else { DeoptimizeIf(cc, check->environment()); } } void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return; if (instr->index()->IsConstantOperand()) { Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), instr->hydrogen()->length()->representation()); __ cmp(ToOperand(instr->length()), immediate); Condition condition = instr->hydrogen()->allow_equality() ? below : below_equal; ApplyCheckIf(condition, instr); } else { __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); Condition condition = instr->hydrogen()->allow_equality() ? above : above_equal; ApplyCheckIf(condition, instr); } } void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); if (!key->IsConstantOperand() && ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), elements_kind)) { __ SmiUntag(ToRegister(key)); } Operand operand(BuildFastArrayOperand( instr->elements(), key, instr->hydrogen()->key()->representation(), elements_kind, 0, instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = double_scratch0(); __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); __ movss(operand, xmm_scratch); } else { __ fld(0); __ fstp_s(operand); } } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); __ movsd(operand, ToDoubleRegister(instr->value())); } else { X87Mov(operand, ToX87Register(instr->value())); } } else { Register value = ToRegister(instr->value()); switch (elements_kind) { case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: __ mov_b(operand, value); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: __ mov_w(operand, value); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: __ mov(operand, value); break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_SMI_ELEMENTS: case FAST_HOLEY_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); break; } } } void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { ExternalReference canonical_nan_reference = ExternalReference::address_of_canonical_non_hole_nan(); Operand double_store_operand = BuildFastArrayOperand( instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, FixedDoubleArray::kHeaderSize - kHeapObjectTag, instr->additional_index()); if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister value = ToDoubleRegister(instr->value()); if (instr->NeedsCanonicalization()) { Label have_value; __ ucomisd(value, value); __ j(parity_odd, &have_value, Label::kNear); // NaN. __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); __ bind(&have_value); } __ movsd(double_store_operand, value); } else { // Can't use SSE2 in the serializer if (instr->hydrogen()->IsConstantHoleStore()) { // This means we should store the (double) hole. No floating point // registers required. double nan_double = FixedDoubleArray::hole_nan_as_double(); uint64_t int_val = BitCast(nan_double); int32_t lower = static_cast(int_val); int32_t upper = static_cast(int_val >> (kBitsPerInt)); __ mov(double_store_operand, Immediate(lower)); Operand double_store_operand2 = BuildFastArrayOperand( instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize, instr->additional_index()); __ mov(double_store_operand2, Immediate(upper)); } else { Label no_special_nan_handling; X87Register value = ToX87Register(instr->value()); X87Fxch(value); if (instr->NeedsCanonicalization()) { __ fld(0); __ fld(0); __ FCmp(); __ j(parity_odd, &no_special_nan_handling, Label::kNear); __ sub(esp, Immediate(kDoubleSize)); __ fst_d(MemOperand(esp, 0)); __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), Immediate(kHoleNanUpper32)); __ add(esp, Immediate(kDoubleSize)); Label canonicalize; __ j(not_equal, &canonicalize, Label::kNear); __ jmp(&no_special_nan_handling, Label::kNear); __ bind(&canonicalize); __ fstp(0); __ fld_d(Operand::StaticVariable(canonical_nan_reference)); } __ bind(&no_special_nan_handling); __ fst_d(double_store_operand); } } } void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { Register elements = ToRegister(instr->elements()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Operand operand = BuildFastArrayOperand( instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag, instr->additional_index()); if (instr->value()->IsRegister()) { __ mov(operand, ToRegister(instr->value())); } else { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (IsSmi(operand_value)) { Immediate immediate = ToImmediate(operand_value, Representation::Smi()); __ mov(operand, immediate); } else { ASSERT(!IsInteger32(operand_value)); Handle handle_value = ToHandle(operand_value); __ mov(operand, handle_value); } } if (instr->hydrogen()->NeedsWriteBarrier()) { ASSERT(instr->value()->IsRegister()); Register value = ToRegister(instr->value()); ASSERT(!instr->key()->IsConstantOperand()); SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. __ lea(key, operand); __ RecordWrite(elements, key, value, GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } } void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { // By cases...external, fast-double, fast if (instr->is_external()) { DoStoreKeyedExternalArray(instr); } else if (instr->hydrogen()->value()->representation().IsDouble()) { DoStoreKeyedFixedDoubleArray(instr); } else { DoStoreKeyedFixedArray(instr); } } void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->object()).is(edx)); ASSERT(ToRegister(instr->key()).is(ecx)); ASSERT(ToRegister(instr->value()).is(eax)); Handle ic = (instr->strict_mode_flag() == kStrictMode) ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() : isolate()->builtins()->KeyedStoreIC_Initialize(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register object = ToRegister(instr->object()); Register temp = ToRegister(instr->temp()); Label no_memento_found; __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); DeoptimizeIf(equal, instr->environment()); __ bind(&no_memento_found); } void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object_reg = ToRegister(instr->object()); Handle from_map = instr->original_map(); Handle to_map = instr->transitioned_map(); ElementsKind from_kind = instr->from_kind(); ElementsKind to_kind = instr->to_kind(); Label not_applicable; bool is_simple_map_transition = IsSimpleMapChangeTransition(from_kind, to_kind); Label::Distance branch_distance = is_simple_map_transition ? Label::kNear : Label::kFar; __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ j(not_equal, ¬_applicable, branch_distance); if (is_simple_map_transition) { Register new_map_reg = ToRegister(instr->new_map_temp()); __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), Immediate(to_map)); // Write barrier. ASSERT_NE(instr->temp(), NULL); __ RecordWriteForMap(object_reg, to_map, new_map_reg, ToRegister(instr->temp()), kDontSaveFPRegs); } else { ASSERT(ToRegister(instr->context()).is(esi)); PushSafepointRegistersScope scope(this); if (!object_reg.is(eax)) { __ mov(eax, object_reg); } __ mov(ebx, to_map); TransitionElementsKindStub stub(from_kind, to_kind); __ CallStub(&stub); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); } __ bind(¬_applicable); } void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { public: DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LStringCharCodeAt* instr_; }; DeferredStringCharCodeAt* deferred = new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); StringCharLoadGenerator::Generate(masm(), factory(), ToRegister(instr->string()), ToRegister(instr->index()), ToRegister(instr->result()), deferred->entry()); __ bind(deferred->exit()); } void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { Register string = ToRegister(instr->string()); Register result = ToRegister(instr->result()); // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. __ Set(result, Immediate(0)); PushSafepointRegistersScope scope(this); __ push(string); // Push the index as a smi. This is safe because of the checks in // DoStringCharCodeAt above. STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); if (instr->index()->IsConstantOperand()) { Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), Representation::Smi()); __ push(immediate); } else { Register index = ToRegister(instr->index()); __ SmiTag(index); __ push(index); } CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr, instr->context()); __ AssertSmi(eax); __ SmiUntag(eax); __ StoreToSafepointRegisterSlot(result, eax); } void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { public: DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredStringCharFromCode(instr_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LStringCharFromCode* instr_; }; DeferredStringCharFromCode* deferred = new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); Register result = ToRegister(instr->result()); ASSERT(!char_code.is(result)); __ cmp(char_code, String::kMaxOneByteCharCode); __ j(above, deferred->entry()); __ Set(result, Immediate(factory()->single_character_string_cache())); __ mov(result, FieldOperand(result, char_code, times_pointer_size, FixedArray::kHeaderSize)); __ cmp(result, factory()->undefined_value()); __ j(equal, deferred->entry()); __ bind(deferred->exit()); } void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { Register char_code = ToRegister(instr->char_code()); Register result = ToRegister(instr->result()); // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. __ Set(result, Immediate(0)); PushSafepointRegistersScope scope(this); __ SmiTag(char_code); __ push(char_code); CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); __ StoreToSafepointRegisterSlot(result, eax); } void LCodeGen::DoStringAdd(LStringAdd* instr) { ASSERT(ToRegister(instr->context()).is(esi)); if (FLAG_new_string_add) { ASSERT(ToRegister(instr->left()).is(edx)); ASSERT(ToRegister(instr->right()).is(eax)); NewStringAddStub stub(instr->hydrogen()->flags(), isolate()->heap()->GetPretenureMode()); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { EmitPushTaggedOperand(instr->left()); EmitPushTaggedOperand(instr->right()); StringAddStub stub(instr->hydrogen()->flags()); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* input = instr->value(); LOperand* output = instr->result(); ASSERT(input->IsRegister() || input->IsStackSlot()); ASSERT(output->IsDoubleRegister()); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); } else if (input->IsRegister()) { Register input_reg = ToRegister(input); __ push(input_reg); X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); __ pop(input_reg); } else { X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); } } void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { Register input = ToRegister(instr->value()); __ SmiTag(input); if (!instr->hydrogen()->value()->HasRange() || !instr->hydrogen()->value()->range()->IsInSmiRange()) { DeoptimizeIf(overflow, instr->environment()); } } void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { LOperand* input = instr->value(); LOperand* output = instr->result(); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); LOperand* temp = instr->temp(); __ LoadUint32(ToDoubleRegister(output), ToRegister(input), ToDoubleRegister(temp)); } else { X87Register res = ToX87Register(output); X87PrepareToWrite(res); __ LoadUint32NoSSE2(ToRegister(input)); X87CommitWrite(res); } } void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { Register input = ToRegister(instr->value()); if (!instr->hydrogen()->value()->HasRange() || !instr->hydrogen()->value()->range()->IsInSmiRange()) { __ test(input, Immediate(0xc0000000)); DeoptimizeIf(not_zero, instr->environment()); } __ SmiTag(input); } void LCodeGen::DoNumberTagI(LNumberTagI* instr) { class DeferredNumberTagI V8_FINAL : public LDeferredCode { public: DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LNumberTagI* instr_; }; LOperand* input = instr->value(); ASSERT(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr, x87_stack_); __ SmiTag(reg); __ j(overflow, deferred->entry()); __ bind(deferred->exit()); } void LCodeGen::DoNumberTagU(LNumberTagU* instr) { class DeferredNumberTagU V8_FINAL : public LDeferredCode { public: DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LNumberTagU* instr_; }; LOperand* input = instr->value(); ASSERT(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr, x87_stack_); __ cmp(reg, Immediate(Smi::kMaxValue)); __ j(above, deferred->entry()); __ SmiTag(reg); __ bind(deferred->exit()); } void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, LOperand* value, IntegerSignedness signedness) { Label slow; Register reg = ToRegister(value); Register tmp = reg.is(eax) ? ecx : eax; XMMRegister xmm_scratch = double_scratch0(); // Preserve the value of all registers. PushSafepointRegistersScope scope(this); Label done; if (signedness == SIGNED_INT32) { // There was overflow, so bits 30 and 31 of the original integer // disagree. Try to allocate a heap number in new space and store // the value in there. If that fails, call the runtime system. __ SmiUntag(reg); __ xor_(reg, 0x80000000); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope feature_scope(masm(), SSE2); __ Cvtsi2sd(xmm_scratch, Operand(reg)); } else { __ push(reg); __ fild_s(Operand(esp, 0)); __ pop(reg); } } else { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope feature_scope(masm(), SSE2); __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(LNumberTagU::cast(instr)->temp())); } else { // There's no fild variant for unsigned values, so zero-extend to a 64-bit // int manually. __ push(Immediate(0)); __ push(reg); __ fild_d(Operand(esp, 0)); __ pop(reg); __ pop(reg); } } if (FLAG_inline_new) { __ AllocateHeapNumber(reg, tmp, no_reg, &slow); __ jmp(&done, Label::kNear); } // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); // TODO(3095996): Put a valid pointer value in the stack slot where the result // register is stored, as this register is in the pointer map, but contains an // integer value. __ StoreToSafepointRegisterSlot(reg, Immediate(0)); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); if (!reg.is(eax)) __ mov(reg, eax); // Done. Put the value in xmm_scratch into the value of the allocated heap // number. __ bind(&done); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope feature_scope(masm(), SSE2); __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); } else { __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); } __ StoreToSafepointRegisterSlot(reg, reg); } void LCodeGen::DoNumberTagD(LNumberTagD* instr) { class DeferredNumberTagD V8_FINAL : public LDeferredCode { public: DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LNumberTagD* instr_; }; Register reg = ToRegister(instr->result()); bool use_sse2 = CpuFeatures::IsSupported(SSE2); if (!use_sse2) { // Put the value to the top of stack X87Register src = ToX87Register(instr->value()); X87LoadForUsage(src); } DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr, x87_stack_); if (FLAG_inline_new) { Register tmp = ToRegister(instr->temp()); __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); } else { __ jmp(deferred->entry()); } __ bind(deferred->exit()); if (use_sse2) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); } else { __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); } } void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. Register reg = ToRegister(instr->result()); __ Set(reg, Immediate(0)); PushSafepointRegistersScope scope(this); // NumberTagI and NumberTagD use the context from the frame, rather than // the environment's HContext or HInlinedContext value. // They only call Runtime::kAllocateHeapNumber. // The corresponding HChange instructions are added in a phase that does // not have easy access to the local context. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(reg, eax); } void LCodeGen::DoSmiTag(LSmiTag* instr) { LOperand* input = instr->value(); ASSERT(input->IsRegister() && input->Equals(instr->result())); ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); __ SmiTag(ToRegister(input)); } void LCodeGen::DoSmiUntag(LSmiUntag* instr) { LOperand* input = instr->value(); Register result = ToRegister(input); ASSERT(input->IsRegister() && input->Equals(instr->result())); if (instr->needs_check()) { __ test(result, Immediate(kSmiTagMask)); DeoptimizeIf(not_zero, instr->environment()); } else { __ AssertSmi(result); } __ SmiUntag(result); } void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, Register temp_reg, X87Register res_reg, bool can_convert_undefined_to_nan, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode) { Label load_smi, done; X87PrepareToWrite(res_reg); if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. __ JumpIfSmi(input_reg, &load_smi, Label::kNear); // Heap number map check. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); if (!can_convert_undefined_to_nan) { DeoptimizeIf(not_equal, env); } else { Label heap_number, convert; __ j(equal, &heap_number, Label::kNear); // Convert undefined (or hole) to NaN. __ cmp(input_reg, factory()->undefined_value()); DeoptimizeIf(not_equal, env); __ bind(&convert); ExternalReference nan = ExternalReference::address_of_canonical_non_hole_nan(); __ fld_d(Operand::StaticVariable(nan)); __ jmp(&done, Label::kNear); __ bind(&heap_number); } // Heap number to x87 conversion. __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); if (deoptimize_on_minus_zero) { __ fldz(); __ FCmp(); __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); __ j(not_zero, &done, Label::kNear); // Use general purpose registers to check if we have -0.0 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); __ test(temp_reg, Immediate(HeapNumber::kSignMask)); __ j(zero, &done, Label::kNear); // Pop FPU stack before deoptimizing. __ fstp(0); DeoptimizeIf(not_zero, env); } __ jmp(&done, Label::kNear); } else { ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } __ bind(&load_smi); // Clobbering a temp is faster than re-tagging the // input register since we avoid dependencies. __ mov(temp_reg, input_reg); __ SmiUntag(temp_reg); // Untag smi before converting to float. __ push(temp_reg); __ fild_s(Operand(esp, 0)); __ add(esp, Immediate(kPointerSize)); __ bind(&done); X87CommitWrite(res_reg); } void LCodeGen::EmitNumberUntagD(Register input_reg, Register temp_reg, XMMRegister result_reg, bool can_convert_undefined_to_nan, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode) { Label convert, load_smi, done; if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. __ JumpIfSmi(input_reg, &load_smi, Label::kNear); // Heap number map check. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); if (can_convert_undefined_to_nan) { __ j(not_equal, &convert, Label::kNear); } else { DeoptimizeIf(not_equal, env); } // Heap number to XMM conversion. __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); if (deoptimize_on_minus_zero) { XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(result_reg, xmm_scratch); __ j(not_zero, &done, Label::kNear); __ movmskpd(temp_reg, result_reg); __ test_b(temp_reg, 1); DeoptimizeIf(not_zero, env); } __ jmp(&done, Label::kNear); if (can_convert_undefined_to_nan) { __ bind(&convert); // Convert undefined (and hole) to NaN. __ cmp(input_reg, factory()->undefined_value()); DeoptimizeIf(not_equal, env); ExternalReference nan = ExternalReference::address_of_canonical_non_hole_nan(); __ movsd(result_reg, Operand::StaticVariable(nan)); __ jmp(&done, Label::kNear); } } else { ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } __ bind(&load_smi); // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the // input register since we avoid dependencies. __ mov(temp_reg, input_reg); __ SmiUntag(temp_reg); // Untag smi before converting to float. __ Cvtsi2sd(result_reg, Operand(temp_reg)); __ bind(&done); } void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { Register input_reg = ToRegister(instr->value()); if (instr->truncating()) { Label no_heap_number, check_bools, check_false; // Heap number map check. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); __ j(not_equal, &no_heap_number, Label::kNear); __ TruncateHeapNumberToI(input_reg, input_reg); __ jmp(done); __ bind(&no_heap_number); // Check for Oddballs. Undefined/False is converted to zero and True to one // for truncating conversions. __ cmp(input_reg, factory()->undefined_value()); __ j(not_equal, &check_bools, Label::kNear); __ Set(input_reg, Immediate(0)); __ jmp(done); __ bind(&check_bools); __ cmp(input_reg, factory()->true_value()); __ j(not_equal, &check_false, Label::kNear); __ Set(input_reg, Immediate(1)); __ jmp(done); __ bind(&check_false); __ cmp(input_reg, factory()->false_value()); __ RecordComment("Deferred TaggedToI: cannot truncate"); DeoptimizeIf(not_equal, instr->environment()); __ Set(input_reg, Immediate(0)); __ jmp(done); } else { Label bailout; XMMRegister scratch = (instr->temp() != NULL) ? ToDoubleRegister(instr->temp()) : no_xmm_reg; __ TaggedToI(input_reg, input_reg, scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout); __ jmp(done); __ bind(&bailout); DeoptimizeIf(no_condition, instr->environment()); } } void LCodeGen::DoTaggedToI(LTaggedToI* instr) { class DeferredTaggedToI V8_FINAL : public LDeferredCode { public: DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LTaggedToI* instr_; }; LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register input_reg = ToRegister(input); ASSERT(input_reg.is(ToRegister(instr->result()))); if (instr->hydrogen()->value()->representation().IsSmi()) { __ SmiUntag(input_reg); } else { DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr, x87_stack_); __ JumpIfNotSmi(input_reg, deferred->entry()); __ SmiUntag(input_reg); __ bind(deferred->exit()); } } void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* input = instr->value(); ASSERT(input->IsRegister()); LOperand* temp = instr->temp(); ASSERT(temp->IsRegister()); LOperand* result = instr->result(); ASSERT(result->IsDoubleRegister()); Register input_reg = ToRegister(input); bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); Register temp_reg = ToRegister(temp); HValue* value = instr->hydrogen()->value(); NumberUntagDMode mode = value->representation().IsSmi() ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister result_reg = ToDoubleRegister(result); EmitNumberUntagD(input_reg, temp_reg, result_reg, instr->hydrogen()->can_convert_undefined_to_nan(), deoptimize_on_minus_zero, instr->environment(), mode); } else { EmitNumberUntagDNoSSE2(input_reg, temp_reg, ToX87Register(instr->result()), instr->hydrogen()->can_convert_undefined_to_nan(), deoptimize_on_minus_zero, instr->environment(), mode); } } void LCodeGen::DoDoubleToI(LDoubleToI* instr) { LOperand* input = instr->value(); ASSERT(input->IsDoubleRegister()); LOperand* result = instr->result(); ASSERT(result->IsRegister()); Register result_reg = ToRegister(result); if (instr->truncating()) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); __ TruncateDoubleToI(result_reg, input_reg); } else { X87Register input_reg = ToX87Register(input); X87Fxch(input_reg); __ TruncateX87TOSToI(result_reg); } } else { Label bailout, done; if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); XMMRegister xmm_scratch = double_scratch0(); __ DoubleToI(result_reg, input_reg, xmm_scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); } else { X87Register input_reg = ToX87Register(input); X87Fxch(input_reg); __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); } __ jmp(&done, Label::kNear); __ bind(&bailout); DeoptimizeIf(no_condition, instr->environment()); __ bind(&done); } } void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { LOperand* input = instr->value(); ASSERT(input->IsDoubleRegister()); LOperand* result = instr->result(); ASSERT(result->IsRegister()); Register result_reg = ToRegister(result); Label bailout, done; if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); XMMRegister xmm_scratch = double_scratch0(); __ DoubleToI(result_reg, input_reg, xmm_scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); } else { X87Register input_reg = ToX87Register(input); X87Fxch(input_reg); __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); } __ jmp(&done, Label::kNear); __ bind(&bailout); DeoptimizeIf(no_condition, instr->environment()); __ bind(&done); __ SmiTag(result_reg); DeoptimizeIf(overflow, instr->environment()); } void LCodeGen::DoCheckSmi(LCheckSmi* instr) { LOperand* input = instr->value(); __ test(ToOperand(input), Immediate(kSmiTagMask)); DeoptimizeIf(not_zero, instr->environment()); } void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { if (!instr->hydrogen()->value()->IsHeapObject()) { LOperand* input = instr->value(); __ test(ToOperand(input), Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); } } void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { Register input = ToRegister(instr->value()); Register temp = ToRegister(instr->temp()); __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); if (instr->hydrogen()->is_interval_check()) { InstanceType first; InstanceType last; instr->hydrogen()->GetCheckInterval(&first, &last); __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), static_cast(first)); // If there is only one type in the interval check for equality. if (first == last) { DeoptimizeIf(not_equal, instr->environment()); } else { DeoptimizeIf(below, instr->environment()); // Omit check for the last type. if (last != LAST_TYPE) { __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), static_cast(last)); DeoptimizeIf(above, instr->environment()); } } } else { uint8_t mask; uint8_t tag; instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); if (IsPowerOf2(mask)) { ASSERT(tag == 0 || IsPowerOf2(tag)); __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); } else { __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); __ and_(temp, mask); __ cmp(temp, tag); DeoptimizeIf(not_equal, instr->environment()); } } } void LCodeGen::DoCheckValue(LCheckValue* instr) { Handle object = instr->hydrogen()->object().handle(); if (instr->hydrogen()->object_in_new_space()) { Register reg = ToRegister(instr->value()); Handle cell = isolate()->factory()->NewCell(object); __ cmp(reg, Operand::ForCell(cell)); } else { Operand operand = ToOperand(instr->value()); __ cmp(operand, object); } DeoptimizeIf(not_equal, instr->environment()); } void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { { PushSafepointRegistersScope scope(this); __ push(object); __ xor_(esi, esi); __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance); RecordSafepointWithRegisters( instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); __ test(eax, Immediate(kSmiTagMask)); } DeoptimizeIf(zero, instr->environment()); } void LCodeGen::DoCheckMaps(LCheckMaps* instr) { class DeferredCheckMaps V8_FINAL : public LDeferredCode { public: DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { SetExit(check_maps()); } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredInstanceMigration(instr_, object_); } Label* check_maps() { return &check_maps_; } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LCheckMaps* instr_; Label check_maps_; Register object_; }; if (instr->hydrogen()->CanOmitMapChecks()) return; LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register reg = ToRegister(input); DeferredCheckMaps* deferred = NULL; if (instr->hydrogen()->has_migration_target()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); __ bind(deferred->check_maps()); } UniqueSet map_set = instr->hydrogen()->map_set(); Label success; for (int i = 0; i < map_set.size() - 1; i++) { Handle map = map_set.at(i).handle(); __ CompareMap(reg, map); __ j(equal, &success, Label::kNear); } Handle map = map_set.at(map_set.size() - 1).handle(); __ CompareMap(reg, map); if (instr->hydrogen()->has_migration_target()) { __ j(not_equal, deferred->entry()); } else { DeoptimizeIf(not_equal, instr->environment()); } __ bind(&success); } void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); XMMRegister xmm_scratch = double_scratch0(); Register result_reg = ToRegister(instr->result()); __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); } void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { ASSERT(instr->unclamped()->Equals(instr->result())); Register value_reg = ToRegister(instr->result()); __ ClampUint8(value_reg); } void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { CpuFeatureScope scope(masm(), SSE2); ASSERT(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); XMMRegister xmm_scratch = double_scratch0(); Label is_smi, done, heap_number; __ JumpIfSmi(input_reg, &is_smi); // Check for heap number __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); __ j(equal, &heap_number, Label::kNear); // Check for undefined. Undefined is converted to zero for clamping // conversions. __ cmp(input_reg, factory()->undefined_value()); DeoptimizeIf(not_equal, instr->environment()); __ mov(input_reg, 0); __ jmp(&done, Label::kNear); // Heap number __ bind(&heap_number); __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); __ jmp(&done, Label::kNear); // smi __ bind(&is_smi); __ SmiUntag(input_reg); __ ClampUint8(input_reg); __ bind(&done); } void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); Register scratch = ToRegister(instr->scratch()); Register scratch2 = ToRegister(instr->scratch2()); Register scratch3 = ToRegister(instr->scratch3()); Label is_smi, done, heap_number, valid_exponent, largest_value, zero_result, maybe_nan_or_infinity; __ JumpIfSmi(input_reg, &is_smi); // Check for heap number __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); __ j(equal, &heap_number, Label::kNear); // Check for undefined. Undefined is converted to zero for clamping // conversions. __ cmp(input_reg, factory()->undefined_value()); DeoptimizeIf(not_equal, instr->environment()); __ jmp(&zero_result, Label::kNear); // Heap number __ bind(&heap_number); // Surprisingly, all of the hand-crafted bit-manipulations below are much // faster than the x86 FPU built-in instruction, especially since "banker's // rounding" would be additionally very expensive // Get exponent word. __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); // Test for negative values --> clamp to zero __ test(scratch, scratch); __ j(negative, &zero_result, Label::kNear); // Get exponent alone in scratch2. __ mov(scratch2, scratch); __ and_(scratch2, HeapNumber::kExponentMask); __ shr(scratch2, HeapNumber::kExponentShift); __ j(zero, &zero_result, Label::kNear); __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); __ j(negative, &zero_result, Label::kNear); const uint32_t non_int8_exponent = 7; __ cmp(scratch2, Immediate(non_int8_exponent + 1)); // If the exponent is too big, check for special values. __ j(greater, &maybe_nan_or_infinity, Label::kNear); __ bind(&valid_exponent); // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent // < 7. The shift bias is the number of bits to shift the mantissa such that // with an exponent of 7 such the that top-most one is in bit 30, allowing // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to // 1). int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; __ lea(result_reg, MemOperand(scratch2, shift_bias)); // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the // top bits of the mantissa. __ and_(scratch, HeapNumber::kMantissaMask); // Put back the implicit 1 of the mantissa __ or_(scratch, 1 << HeapNumber::kExponentShift); // Shift up to round __ shl_cl(scratch); // Use "banker's rounding" to spec: If fractional part of number is 0.5, then // use the bit in the "ones" place and add it to the "halves" place, which has // the effect of rounding to even. __ mov(scratch2, scratch); const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; const uint32_t one_bit_shift = one_half_bit_shift + 1; __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); Label no_round; __ j(less, &no_round, Label::kNear); Label round_up; __ mov(scratch2, Immediate(1 << one_half_bit_shift)); __ j(greater, &round_up, Label::kNear); __ test(scratch3, scratch3); __ j(not_zero, &round_up, Label::kNear); __ mov(scratch2, scratch); __ and_(scratch2, Immediate(1 << one_bit_shift)); __ shr(scratch2, 1); __ bind(&round_up); __ add(scratch, scratch2); __ j(overflow, &largest_value, Label::kNear); __ bind(&no_round); __ shr(scratch, 23); __ mov(result_reg, scratch); __ jmp(&done, Label::kNear); __ bind(&maybe_nan_or_infinity); // Check for NaN/Infinity, all other values map to 255 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); __ j(not_equal, &largest_value, Label::kNear); // Check for NaN, which differs from Infinity in that at least one mantissa // bit is set. __ and_(scratch, HeapNumber::kMantissaMask); __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN // Infinity -> Fall through to map to 255. __ bind(&largest_value); __ mov(result_reg, Immediate(255)); __ jmp(&done, Label::kNear); __ bind(&zero_result); __ xor_(result_reg, result_reg); __ jmp(&done, Label::kNear); // smi __ bind(&is_smi); if (!input_reg.is(result_reg)) { __ mov(result_reg, input_reg); } __ SmiUntag(result_reg); __ ClampUint8(result_reg); __ bind(&done); } void LCodeGen::DoAllocate(LAllocate* instr) { class DeferredAllocate V8_FINAL : public LDeferredCode { public: DeferredAllocate(LCodeGen* codegen, LAllocate* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredAllocate(instr_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LAllocate* instr_; }; DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr, x87_stack_); Register result = ToRegister(instr->result()); Register temp = ToRegister(instr->temp()); // Allocate memory for the object. AllocationFlags flags = TAG_OBJECT; if (instr->hydrogen()->MustAllocateDoubleAligned()) { flags = static_cast(flags | DOUBLE_ALIGNMENT); } if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast(flags | PRETENURE_OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); flags = static_cast(flags | PRETENURE_OLD_DATA_SPACE); } if (instr->size()->IsConstantOperand()) { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); if (size <= Page::kMaxRegularHeapObjectSize) { __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); } else { __ jmp(deferred->entry()); } } else { Register size = ToRegister(instr->size()); __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); } __ bind(deferred->exit()); if (instr->hydrogen()->MustPrefillWithFiller()) { if (instr->size()->IsConstantOperand()) { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); __ mov(temp, (size / kPointerSize) - 1); } else { temp = ToRegister(instr->size()); __ shr(temp, kPointerSizeLog2); __ dec(temp); } Label loop; __ bind(&loop); __ mov(FieldOperand(result, temp, times_pointer_size, 0), isolate()->factory()->one_pointer_filler_map()); __ dec(temp); __ j(not_zero, &loop); } } void LCodeGen::DoDeferredAllocate(LAllocate* instr) { Register result = ToRegister(instr->result()); // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. __ Set(result, Immediate(Smi::FromInt(0))); PushSafepointRegistersScope scope(this); if (instr->size()->IsRegister()) { Register size = ToRegister(instr->size()); ASSERT(!size.is(result)); __ SmiTag(ToRegister(instr->size())); __ push(size); } else { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); __ push(Immediate(Smi::FromInt(size))); } int flags = AllocateDoubleAlignFlag::encode( instr->hydrogen()->MustAllocateDoubleAligned()); if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); } else { flags = AllocateTargetSpace::update(flags, NEW_SPACE); } __ push(Immediate(Smi::FromInt(flags))); CallRuntimeFromDeferred( Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); __ StoreToSafepointRegisterSlot(result, eax); } void LCodeGen::DoToFastProperties(LToFastProperties* instr) { ASSERT(ToRegister(instr->value()).is(eax)); __ push(eax); CallRuntime(Runtime::kToFastProperties, 1, instr); } void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); Label materialized; // Registers will be used as follows: // ecx = literals array. // ebx = regexp literal. // eax = regexp literal clone. // esi = context. int literal_offset = FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); __ LoadHeapObject(ecx, instr->hydrogen()->literals()); __ mov(ebx, FieldOperand(ecx, literal_offset)); __ cmp(ebx, factory()->undefined_value()); __ j(not_equal, &materialized, Label::kNear); // Create regexp literal using runtime function // Result will be in eax. __ push(ecx); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); __ push(Immediate(instr->hydrogen()->pattern())); __ push(Immediate(instr->hydrogen()->flags())); CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ mov(ebx, eax); __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated, Label::kNear); __ bind(&runtime_allocate); __ push(ebx); __ push(Immediate(Smi::FromInt(size))); CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); __ pop(ebx); __ bind(&allocated); // Copy the content into the newly allocated memory. // (Unroll copy loop once for better throughput). for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { __ mov(edx, FieldOperand(ebx, i)); __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); __ mov(FieldOperand(eax, i), edx); __ mov(FieldOperand(eax, i + kPointerSize), ecx); } if ((size % (2 * kPointerSize)) != 0) { __ mov(edx, FieldOperand(ebx, size - kPointerSize)); __ mov(FieldOperand(eax, size - kPointerSize), edx); } } void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); if (!pretenure && instr->hydrogen()->has_no_literals()) { FastNewClosureStub stub(instr->hydrogen()->language_mode(), instr->hydrogen()->is_generator()); __ mov(ebx, Immediate(instr->hydrogen()->shared_info())); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ push(esi); __ push(Immediate(instr->hydrogen()->shared_info())); __ push(Immediate(pretenure ? factory()->true_value() : factory()->false_value())); CallRuntime(Runtime::kNewClosure, 3, instr); } } void LCodeGen::DoTypeof(LTypeof* instr) { ASSERT(ToRegister(instr->context()).is(esi)); LOperand* input = instr->value(); EmitPushTaggedOperand(input); CallRuntime(Runtime::kTypeof, 1, instr); } void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { Register input = ToRegister(instr->value()); Condition final_branch_condition = EmitTypeofIs(instr, input); if (final_branch_condition != no_condition) { EmitBranch(instr, final_branch_condition); } } Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { Label* true_label = instr->TrueLabel(chunk_); Label* false_label = instr->FalseLabel(chunk_); Handle type_name = instr->type_literal(); int left_block = instr->TrueDestination(chunk_); int right_block = instr->FalseDestination(chunk_); int next_block = GetNextEmittedBlock(); Label::Distance true_distance = left_block == next_block ? Label::kNear : Label::kFar; Label::Distance false_distance = right_block == next_block ? Label::kNear : Label::kFar; Condition final_branch_condition = no_condition; if (type_name->Equals(heap()->number_string())) { __ JumpIfSmi(input, true_label, true_distance); __ cmp(FieldOperand(input, HeapObject::kMapOffset), factory()->heap_number_map()); final_branch_condition = equal; } else if (type_name->Equals(heap()->string_string())) { __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); __ j(above_equal, false_label, false_distance); __ test_b(FieldOperand(input, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); final_branch_condition = zero; } else if (type_name->Equals(heap()->symbol_string())) { __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, SYMBOL_TYPE, input); final_branch_condition = equal; } else if (type_name->Equals(heap()->boolean_string())) { __ cmp(input, factory()->true_value()); __ j(equal, true_label, true_distance); __ cmp(input, factory()->false_value()); final_branch_condition = equal; } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { __ cmp(input, factory()->null_value()); final_branch_condition = equal; } else if (type_name->Equals(heap()->undefined_string())) { __ cmp(input, factory()->undefined_value()); __ j(equal, true_label, true_distance); __ JumpIfSmi(input, false_label, false_distance); // Check for undetectable objects => true. __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); __ test_b(FieldOperand(input, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); final_branch_condition = not_zero; } else if (type_name->Equals(heap()->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label, false_distance); __ CmpObjectType(input, JS_FUNCTION_TYPE, input); __ j(equal, true_label, true_distance); __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); final_branch_condition = equal; } else if (type_name->Equals(heap()->object_string())) { __ JumpIfSmi(input, false_label, false_distance); if (!FLAG_harmony_typeof) { __ cmp(input, factory()->null_value()); __ j(equal, true_label, true_distance); } __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); __ j(below, false_label, false_distance); __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ j(above, false_label, false_distance); // Check for undetectable objects => false. __ test_b(FieldOperand(input, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); final_branch_condition = zero; } else { __ jmp(false_label, false_distance); } return final_branch_condition; } void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { Register temp = ToRegister(instr->temp()); EmitIsConstructCall(temp); EmitBranch(instr, equal); } void LCodeGen::EmitIsConstructCall(Register temp) { // Get the frame pointer for the calling frame. __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); // Skip the arguments adaptor frame if it exists. Label check_frame_marker; __ cmp(Operand(temp, StandardFrameConstants::kContextOffset), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(not_equal, &check_frame_marker, Label::kNear); __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); // Check the marker in the calling frame. __ bind(&check_frame_marker); __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); } void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { if (!info()->IsStub()) { // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); if (current_pc < last_lazy_deopt_pc_ + space_needed) { int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; __ Nop(padding_size); } } last_lazy_deopt_pc_ = masm()->pc_offset(); } void LCodeGen::DoLazyBailout(LLazyBailout* instr) { EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } void LCodeGen::DoDeoptimize(LDeoptimize* instr) { Deoptimizer::BailoutType type = instr->hydrogen()->type(); // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the // needed return address), even though the implementation of LAZY and EAGER is // now identical. When LAZY is eventually completely folded into EAGER, remove // the special case below. if (info()->IsStub() && type == Deoptimizer::EAGER) { type = Deoptimizer::LAZY; } Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); DeoptimizeIf(no_condition, instr->environment(), type); } void LCodeGen::DoDummy(LDummy* instr) { // Nothing to see here, move on! } void LCodeGen::DoDummyUse(LDummyUse* instr) { // Nothing to see here, move on! } void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } void LCodeGen::DoStackCheck(LStackCheck* instr) { class DeferredStackCheck V8_FINAL : public LDeferredCode { public: DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr, const X87Stack& x87_stack) : LDeferredCode(codegen, x87_stack), instr_(instr) { } virtual void Generate() V8_OVERRIDE { codegen()->DoDeferredStackCheck(instr_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } private: LStackCheck* instr_; }; ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); // There is no LLazyBailout instruction for stack-checks. We have to // prepare for lazy deoptimization explicitly here. if (instr->hydrogen()->is_function_entry()) { // Perform stack overflow check. Label done; ExternalReference stack_limit = ExternalReference::address_of_stack_limit(isolate()); __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(above_equal, &done, Label::kNear); ASSERT(instr->context()->IsRegister()); ASSERT(ToRegister(instr->context()).is(esi)); CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); } else { ASSERT(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = new(zone()) DeferredStackCheck(this, instr, x87_stack_); ExternalReference stack_limit = ExternalReference::address_of_stack_limit(isolate()); __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(below, deferred_stack_check->entry()); EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); __ bind(instr->done_label()); deferred_stack_check->SetExit(instr->done_label()); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); // Don't record a deoptimization index for the safepoint here. // This will be done explicitly when emitting call and the safepoint in // the deferred code. } } void LCodeGen::DoOsrEntry(LOsrEntry* instr) { // This is a pseudo-instruction that ensures that the environment here is // properly registered for deoptimization and records the assembler's PC // offset. LEnvironment* environment = instr->environment(); // If the environment were already registered, we would have no way of // backpatching it with the spill slot operands. ASSERT(!environment->HasBeenRegistered()); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); GenerateOsrPrologue(); } void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { ASSERT(ToRegister(instr->context()).is(esi)); __ cmp(eax, isolate()->factory()->undefined_value()); DeoptimizeIf(equal, instr->environment()); __ cmp(eax, isolate()->factory()->null_value()); DeoptimizeIf(equal, instr->environment()); __ test(eax, Immediate(kSmiTagMask)); DeoptimizeIf(zero, instr->environment()); STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); DeoptimizeIf(below_equal, instr->environment()); Label use_cache, call_runtime; __ CheckEnumCache(&call_runtime); __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); __ jmp(&use_cache, Label::kNear); // Get the set of properties to enumerate. __ bind(&call_runtime); __ push(eax); CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); __ cmp(FieldOperand(eax, HeapObject::kMapOffset), isolate()->factory()->meta_map()); DeoptimizeIf(not_equal, instr->environment()); __ bind(&use_cache); } void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { Register map = ToRegister(instr->map()); Register result = ToRegister(instr->result()); Label load_cache, done; __ EnumLength(result, map); __ cmp(result, Immediate(Smi::FromInt(0))); __ j(not_equal, &load_cache, Label::kNear); __ mov(result, isolate()->factory()->empty_fixed_array()); __ jmp(&done, Label::kNear); __ bind(&load_cache); __ LoadInstanceDescriptors(map, result); __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheOffset)); __ mov(result, FieldOperand(result, FixedArray::SizeFor(instr->idx()))); __ bind(&done); __ test(result, result); DeoptimizeIf(equal, instr->environment()); } void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { Register object = ToRegister(instr->value()); __ cmp(ToRegister(instr->map()), FieldOperand(object, HeapObject::kMapOffset)); DeoptimizeIf(not_equal, instr->environment()); } void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { Register object = ToRegister(instr->object()); Register index = ToRegister(instr->index()); Label out_of_object, done; __ cmp(index, Immediate(0)); __ j(less, &out_of_object, Label::kNear); __ mov(object, FieldOperand(object, index, times_half_pointer_size, JSObject::kHeaderSize)); __ jmp(&done, Label::kNear); __ bind(&out_of_object); __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); __ neg(index); // Index is now equal to out of object property index plus 1. __ mov(object, FieldOperand(object, index, times_half_pointer_size, FixedArray::kHeaderSize - kPointerSize)); __ bind(&done); } #undef __ } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32