diff options
Diffstat (limited to 'deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc')
-rw-r--r-- | deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc | 6275 |
1 files changed, 6275 insertions, 0 deletions
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc new file mode 100644 index 0000000000..074628b5ef --- /dev/null +++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc @@ -0,0 +1,6275 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_X87 + +#include "src/crankshaft/x87/lithium-codegen-x87.h" + +#include "src/base/bits.h" +#include "src/code-factory.h" +#include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/crankshaft/hydrogen-osr.h" +#include "src/deoptimizer.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" +#include "src/profiler/cpu-profiler.h" +#include "src/x87/frames-x87.h" + +namespace v8 { +namespace internal { + + +// When invoking builtins, we need to record the safepoint in the middle of +// the invoke instruction sequence generated by the macro assembler. +class SafepointGenerator final : public CallWrapper { + public: + SafepointGenerator(LCodeGen* codegen, + LPointerMap* pointers, + Safepoint::DeoptMode mode) + : codegen_(codegen), + pointers_(pointers), + deopt_mode_(mode) {} + virtual ~SafepointGenerator() {} + + void BeforeCall(int call_size) const override {} + + void AfterCall() const override { + codegen_->RecordSafepoint(pointers_, deopt_mode_); + } + + private: + LCodeGen* codegen_; + LPointerMap* pointers_; + Safepoint::DeoptMode deopt_mode_; +}; + + +#define __ masm()-> + +bool LCodeGen::GenerateCode() { + LPhase phase("Z_Code generation", chunk()); + DCHECK(is_unused()); + status_ = GENERATING; + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + + support_aligned_spilled_doubles_ = info()->IsOptimizing(); + + dynamic_frame_alignment_ = info()->IsOptimizing() && + ((chunk()->num_double_slots() > 2 && + !chunk()->graph()->is_recursive()) || + !info()->osr_ast_id().IsNone()); + + return GeneratePrologue() && + GenerateBody() && + GenerateDeferredCode() && + GenerateJumpTable() && + GenerateSafepointTable(); +} + + +void LCodeGen::FinishCode(Handle<Code> code) { + DCHECK(is_done()); + code->set_stack_slots(GetStackSlotCount()); + code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + PopulateDeoptimizationData(code); + if (info()->ShouldEnsureSpaceForLazyDeopt()) { + Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); + } +} + + +#ifdef _MSC_VER +void LCodeGen::MakeSureStackPagesMapped(int offset) { + const int kPageSize = 4 * KB; + for (offset -= kPageSize; offset > 0; offset -= kPageSize) { + __ mov(Operand(esp, offset), eax); + } +} +#endif + + +bool LCodeGen::GeneratePrologue() { + DCHECK(is_generating()); + + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); + +#ifdef DEBUG + if (strlen(FLAG_stop_at) > 0 && + info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ int3(); + } +#endif + + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { + // Move state of dynamic frame alignment into edx. + __ Move(edx, Immediate(kNoAlignmentPadding)); + + Label do_not_pad, align_loop; + STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); + // Align esp + 4 to a multiple of 2 * kPointerSize. + __ test(esp, Immediate(kPointerSize)); + __ j(not_zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + __ mov(edx, Immediate(kAlignmentPaddingPushed)); + // Copy arguments, receiver, and return address. + __ mov(ecx, Immediate(scope()->num_parameters() + 2)); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); + __ bind(&do_not_pad); + } + } + + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + DCHECK(!frame_is_built_); + frame_is_built_ = true; + if (info()->IsStub()) { + __ StubPrologue(); + } else { + __ Prologue(info()->IsCodePreAgingActive()); + } + } + + if (info()->IsOptimizing() && + dynamic_frame_alignment_ && + FLAG_debug_code) { + __ test(esp, Immediate(kPointerSize)); + __ Assert(zero, kFrameIsExpectedToBeAligned); + } + + // Reserve space for the stack slots needed by the code. + int slots = GetStackSlotCount(); + DCHECK(slots != 0 || !info()->IsOptimizing()); + if (slots > 0) { + if (slots == 1) { + if (dynamic_frame_alignment_) { + __ push(edx); + } else { + __ push(Immediate(kNoAlignmentPadding)); + } + } else { + if (FLAG_debug_code) { + __ sub(Operand(esp), Immediate(slots * kPointerSize)); +#ifdef _MSC_VER + MakeSureStackPagesMapped(slots * kPointerSize); +#endif + __ push(eax); + __ mov(Operand(eax), Immediate(slots)); + Label loop; + __ bind(&loop); + __ mov(MemOperand(esp, eax, times_4, 0), + Immediate(kSlotsZapValue)); + __ dec(eax); + __ j(not_zero, &loop); + __ pop(eax); + } else { + __ sub(Operand(esp), Immediate(slots * kPointerSize)); +#ifdef _MSC_VER + MakeSureStackPagesMapped(slots * kPointerSize); +#endif + } + + if (support_aligned_spilled_doubles_) { + Comment(";;; Store dynamic frame alignment tag for spilled doubles"); + // Store dynamic frame alignment state in the first local. + int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; + if (dynamic_frame_alignment_) { + __ mov(Operand(ebp, offset), edx); + } else { + __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); + } + } + } + } + + // Initailize FPU state. + __ fninit(); + + return !is_aborted(); +} + + +void LCodeGen::DoPrologue(LPrologue* instr) { + Comment(";;; Prologue begin"); + + // Possibly allocate a local context. + if (info_->num_heap_slots() > 0) { + Comment(";;; Allocate local context"); + bool need_write_barrier = true; + // Argument to NewContext is the function, which is still in edi. + int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; + if (info()->scope()->is_script_scope()) { + __ push(edi); + __ Push(info()->scope()->GetScopeInfo(info()->isolate())); + __ CallRuntime(Runtime::kNewScriptContext, 2); + deopt_mode = Safepoint::kLazyDeopt; + } else if (slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(isolate(), slots); + __ CallStub(&stub); + // Result of FastNewContextStub is always in new space. + need_write_barrier = false; + } else { + __ push(edi); + __ CallRuntime(Runtime::kNewFunctionContext, 1); + } + RecordSafepoint(deopt_mode); + + // Context is returned in eax. It replaces the context passed to us. + // It's saved in the stack and kept live in esi. + __ mov(esi, eax); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax); + + // Copy parameters into context if necessary. + int num_parameters = scope()->num_parameters(); + int first_parameter = scope()->has_this_declaration() ? -1 : 0; + for (int i = first_parameter; i < num_parameters; i++) { + Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i); + if (var->IsContextSlot()) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ mov(eax, Operand(ebp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(var->index()); + __ mov(Operand(esi, context_offset), eax); + // Update the write barrier. This clobbers eax and ebx. + if (need_write_barrier) { + __ RecordWriteContextSlot(esi, context_offset, eax, ebx, + kDontSaveFPRegs); + } else if (FLAG_debug_code) { + Label done; + __ JumpIfInNewSpace(esi, eax, &done, Label::kNear); + __ Abort(kExpectedNewSpaceObject); + __ bind(&done); + } + } + } + Comment(";;; End allocate local context"); + } + + Comment(";;; Prologue end"); +} + + +void LCodeGen::GenerateOsrPrologue() { + // Generate the OSR entry prologue at the first unknown OSR value, or if there + // are none, at the OSR entrypoint instruction. + if (osr_pc_offset_ >= 0) return; + + osr_pc_offset_ = masm()->pc_offset(); + + // Move state of dynamic frame alignment into edx. + __ Move(edx, Immediate(kNoAlignmentPadding)); + + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { + Label do_not_pad, align_loop; + // Align ebp + 4 to a multiple of 2 * kPointerSize. + __ test(ebp, Immediate(kPointerSize)); + __ j(zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + __ mov(edx, Immediate(kAlignmentPaddingPushed)); + + // Move all parts of the frame over one word. The frame consists of: + // unoptimized frame slots, alignment state, context, frame pointer, return + // address, receiver, and the arguments. + __ mov(ecx, Immediate(scope()->num_parameters() + + 5 + graph()->osr()->UnoptimizedFrameSlots())); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); + __ sub(Operand(ebp), Immediate(kPointerSize)); + __ bind(&do_not_pad); + } + + // Save the first local, which is overwritten by the alignment state. + Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize); + __ push(alignment_loc); + + // Set the dynamic frame alignment state. + __ mov(alignment_loc, edx); + + // Adjust the frame size, subsuming the unoptimized frame into the + // optimized frame. + int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); + DCHECK(slots >= 1); + __ sub(esp, Immediate((slots - 1) * kPointerSize)); + + // Initailize FPU state. + __ fninit(); +} + + +void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { + if (instr->IsCall()) { + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + } + if (!instr->IsLazyBailout() && !instr->IsGap()) { + safepoints_.BumpLastLazySafepointIndex(); + } + FlushX87StackIfNecessary(instr); +} + + +void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { + // When return from function call, FPU should be initialized again. + if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) { + bool double_result = instr->HasDoubleRegisterResult(); + if (double_result) { + __ lea(esp, Operand(esp, -kDoubleSize)); + __ fstp_d(Operand(esp, 0)); + } + __ fninit(); + if (double_result) { + __ fld_d(Operand(esp, 0)); + __ lea(esp, Operand(esp, kDoubleSize)); + } + } + if (instr->IsGoto()) { + x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this); + } else if (FLAG_debug_code && FLAG_enable_slow_asserts && + !instr->IsGap() && !instr->IsReturn()) { + if (instr->ClobbersDoubleRegisters(isolate())) { + if (instr->HasDoubleRegisterResult()) { + DCHECK_EQ(1, x87_stack_.depth()); + } else { + DCHECK_EQ(0, x87_stack_.depth()); + } + } + __ VerifyX87StackDepth(x87_stack_.depth()); + } +} + + +bool LCodeGen::GenerateJumpTable() { + if (!jump_table_.length()) return !is_aborted(); + + Label needs_frame; + Comment(";;; -------------------- Jump table --------------------"); + + for (int i = 0; i < jump_table_.length(); i++) { + Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; + __ bind(&table_entry->label); + Address entry = table_entry->address; + DeoptComment(table_entry->deopt_info); + if (table_entry->needs_frame) { + DCHECK(!info()->saves_caller_doubles()); + __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); + __ call(&needs_frame); + } else { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } + info()->LogDeoptCallPosition(masm()->pc_offset(), + table_entry->deopt_info.inlining_id); + } + if (needs_frame.is_linked()) { + __ bind(&needs_frame); + + /* stack layout + 4: entry address + 3: return address <-- esp + 2: garbage + 1: garbage + 0: garbage + */ + __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker. + __ push(MemOperand(esp, kPointerSize)); // Copy return address. + __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address. + + /* stack layout + 4: entry address + 3: return address + 2: garbage + 1: return address + 0: entry address <-- esp + */ + __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp. + + // Copy context. + __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset)); + __ mov(MemOperand(esp, 3 * kPointerSize), ebp); + // Fill ebp with the right stack frame address. + __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); + + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + __ mov(MemOperand(esp, 2 * kPointerSize), + Immediate(Smi::FromInt(StackFrame::STUB))); + + /* stack layout + 4: old ebp + 3: context pointer + 2: stub marker + 1: return address + 0: entry address <-- esp + */ + __ ret(0); // Call the continuation without clobbering registers. + } + return !is_aborted(); +} + + +bool LCodeGen::GenerateDeferredCode() { + DCHECK(is_generating()); + if (deferred_.length() > 0) { + for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { + LDeferredCode* code = deferred_[i]; + X87Stack copy(code->x87_stack()); + x87_stack_ = copy; + + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition( + chunk()->graph()->SourcePositionToScriptPosition(value->position())); + + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); + __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Build frame"); + DCHECK(!frame_is_built_); + DCHECK(info()->IsStub()); + frame_is_built_ = true; + // Build the frame in such a way that esi isn't trashed. + __ push(ebp); // Caller's frame pointer. + __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + __ lea(ebp, Operand(esp, 2 * kPointerSize)); + Comment(";;; Deferred code"); + } + code->Generate(); + if (NeedsDeferredFrame()) { + __ bind(code->done()); + Comment(";;; Destroy frame"); + DCHECK(frame_is_built_); + frame_is_built_ = false; + __ mov(esp, ebp); + __ pop(ebp); + } + __ jmp(code->exit()); + } + } + + // Deferred code is the last part of the instruction sequence. Mark + // the generated code as done unless we bailed out. + if (!is_aborted()) status_ = DONE; + return !is_aborted(); +} + + +bool LCodeGen::GenerateSafepointTable() { + DCHECK(is_done()); + if (info()->ShouldEnsureSpaceForLazyDeopt()) { + // For lazy deoptimization we need space to patch a call after every call. + // Ensure there is always space for such patching, even if the code ends + // in a call. + int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); + while (masm()->pc_offset() < target_offset) { + masm()->nop(); + } + } + safepoints_.Emit(masm(), GetStackSlotCount()); + return !is_aborted(); +} + + +Register LCodeGen::ToRegister(int code) const { + return Register::from_code(code); +} + + +X87Register LCodeGen::ToX87Register(int code) const { + return X87Register::from_code(code); +} + + +void LCodeGen::X87LoadForUsage(X87Register reg) { + DCHECK(x87_stack_.Contains(reg)); + x87_stack_.Fxch(reg); + x87_stack_.pop(); +} + + +void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { + DCHECK(x87_stack_.Contains(reg1)); + DCHECK(x87_stack_.Contains(reg2)); + if (reg1.is(reg2) && x87_stack_.depth() == 1) { + __ fld(x87_stack_.st(reg1)); + x87_stack_.push(reg1); + x87_stack_.pop(); + x87_stack_.pop(); + } else { + x87_stack_.Fxch(reg1, 1); + x87_stack_.Fxch(reg2); + x87_stack_.pop(); + x87_stack_.pop(); + } +} + + +int LCodeGen::X87Stack::GetLayout() { + int layout = stack_depth_; + for (int i = 0; i < stack_depth_; i++) { + layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3)); + } + + return layout; +} + + +void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { + DCHECK(is_mutable_); + DCHECK(Contains(reg) && stack_depth_ > other_slot); + int i = ArrayIndex(reg); + int st = st2idx(i); + if (st != other_slot) { + int other_i = st2idx(other_slot); + X87Register other = stack_[other_i]; + stack_[other_i] = reg; + stack_[i] = other; + if (st == 0) { + __ fxch(other_slot); + } else if (other_slot == 0) { + __ fxch(st); + } else { + __ fxch(st); + __ fxch(other_slot); + __ fxch(st); + } + } +} + + +int LCodeGen::X87Stack::st2idx(int pos) { + return stack_depth_ - pos - 1; +} + + +int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { + for (int i = 0; i < stack_depth_; i++) { + if (stack_[i].is(reg)) return i; + } + UNREACHABLE(); + return -1; +} + + +bool LCodeGen::X87Stack::Contains(X87Register reg) { + for (int i = 0; i < stack_depth_; i++) { + if (stack_[i].is(reg)) return true; + } + return false; +} + + +void LCodeGen::X87Stack::Free(X87Register reg) { + DCHECK(is_mutable_); + DCHECK(Contains(reg)); + int i = ArrayIndex(reg); + int st = st2idx(i); + if (st > 0) { + // keep track of how fstp(i) changes the order of elements + int tos_i = st2idx(0); + stack_[i] = stack_[tos_i]; + } + pop(); + __ fstp(st); +} + + +void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { + if (x87_stack_.Contains(dst)) { + x87_stack_.Fxch(dst); + __ fstp(0); + } else { + x87_stack_.push(dst); + } + X87Fld(src, opts); +} + + +void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) { + if (x87_stack_.Contains(dst)) { + x87_stack_.Fxch(dst); + __ fstp(0); + x87_stack_.pop(); + // Push ST(i) onto the FPU register stack + __ fld(x87_stack_.st(src)); + x87_stack_.push(dst); + } else { + // Push ST(i) onto the FPU register stack + __ fld(x87_stack_.st(src)); + x87_stack_.push(dst); + } +} + + +void LCodeGen::X87Fld(Operand src, X87OperandType opts) { + DCHECK(!src.is_reg_only()); + switch (opts) { + case kX87DoubleOperand: + __ fld_d(src); + break; + case kX87FloatOperand: + __ fld_s(src); + break; + case kX87IntOperand: + __ fild_s(src); + break; + default: + UNREACHABLE(); + } +} + + +void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { + DCHECK(!dst.is_reg_only()); + x87_stack_.Fxch(src); + switch (opts) { + case kX87DoubleOperand: + __ fst_d(dst); + break; + case kX87FloatOperand: + __ fst_s(dst); + break; + case kX87IntOperand: + __ fist_s(dst); + break; + default: + UNREACHABLE(); + } +} + + +void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { + DCHECK(is_mutable_); + if (Contains(reg)) { + Free(reg); + } + // Mark this register as the next register to write to + stack_[stack_depth_] = reg; +} + + +void LCodeGen::X87Stack::CommitWrite(X87Register reg) { + DCHECK(is_mutable_); + // Assert the reg is prepared to write, but not on the virtual stack yet + DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) && + stack_depth_ < X87Register::kMaxNumAllocatableRegisters); + stack_depth_++; +} + + +void LCodeGen::X87PrepareBinaryOp( + X87Register left, X87Register right, X87Register result) { + // You need to use DefineSameAsFirst for x87 instructions + DCHECK(result.is(left)); + x87_stack_.Fxch(right, 1); + x87_stack_.Fxch(left); +} + + +void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { + if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { + bool double_inputs = instr->HasDoubleRegisterInput(); + + // Flush stack from tos down, since FreeX87() will mess with tos + for (int i = stack_depth_-1; i >= 0; i--) { + X87Register reg = stack_[i]; + // Skip registers which contain the inputs for the next instruction + // when flushing the stack + if (double_inputs && instr->IsDoubleInput(reg, cgen)) { + continue; + } + Free(reg); + if (i < stack_depth_-1) i++; + } + } + if (instr->IsReturn()) { + while (stack_depth_ > 0) { + __ fstp(0); + stack_depth_--; + } + if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); + } +} + + +void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr, + LCodeGen* cgen) { + // For going to a joined block, an explicit LClobberDoubles is inserted before + // LGoto. Because all used x87 registers are spilled to stack slots. The + // ResolvePhis phase of register allocator could guarantee the two input's x87 + // stacks have the same layout. So don't check stack_depth_ <= 1 here. + int goto_block_id = goto_instr->block_id(); + if (current_block_id + 1 != goto_block_id) { + // If we have a value on the x87 stack on leaving a block, it must be a + // phi input. If the next block we compile is not the join block, we have + // to discard the stack state. + // Before discarding the stack state, we need to save it if the "goto block" + // has unreachable last predecessor when FLAG_unreachable_code_elimination. + if (FLAG_unreachable_code_elimination) { + int length = goto_instr->block()->predecessors()->length(); + bool has_unreachable_last_predecessor = false; + for (int i = 0; i < length; i++) { + HBasicBlock* block = goto_instr->block()->predecessors()->at(i); + if (block->IsUnreachable() && + (block->block_id() + 1) == goto_block_id) { + has_unreachable_last_predecessor = true; + } + } + if (has_unreachable_last_predecessor) { + if (cgen->x87_stack_map_.find(goto_block_id) == + cgen->x87_stack_map_.end()) { + X87Stack* stack = new (cgen->zone()) X87Stack(*this); + cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack)); + } + } + } + + // Discard the stack state. + stack_depth_ = 0; + } +} + + +void LCodeGen::EmitFlushX87ForDeopt() { + // The deoptimizer does not support X87 Registers. But as long as we + // deopt from a stub its not a problem, since we will re-materialize the + // original stub inputs, which can't be double registers. + // DCHECK(info()->IsStub()); + if (FLAG_debug_code && FLAG_enable_slow_asserts) { + __ pushfd(); + __ VerifyX87StackDepth(x87_stack_.depth()); + __ popfd(); + } + + // Flush X87 stack in the deoptimizer entry. +} + + +Register LCodeGen::ToRegister(LOperand* op) const { + DCHECK(op->IsRegister()); + return ToRegister(op->index()); +} + + +X87Register LCodeGen::ToX87Register(LOperand* op) const { + DCHECK(op->IsDoubleRegister()); + return ToX87Register(op->index()); +} + + +int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { + return ToRepresentation(op, Representation::Integer32()); +} + + +int32_t LCodeGen::ToRepresentation(LConstantOperand* op, + const Representation& r) const { + HConstant* constant = chunk_->LookupConstant(op); + if (r.IsExternal()) { + return reinterpret_cast<int32_t>( + constant->ExternalReferenceValue().address()); + } + int32_t value = constant->Integer32Value(); + if (r.IsInteger32()) return value; + DCHECK(r.IsSmiOrTagged()); + return reinterpret_cast<int32_t>(Smi::FromInt(value)); +} + + +Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); + return constant->handle(isolate()); +} + + +double LCodeGen::ToDouble(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(constant->HasDoubleValue()); + return constant->DoubleValue(); +} + + +ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + DCHECK(constant->HasExternalReferenceValue()); + return constant->ExternalReferenceValue(); +} + + +bool LCodeGen::IsInteger32(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); +} + + +bool LCodeGen::IsSmi(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmi(); +} + + +static int ArgumentsOffsetWithoutFrame(int index) { + DCHECK(index < 0); + return -(index + 1) * kPointerSize + kPCOnStackSize; +} + + +Operand LCodeGen::ToOperand(LOperand* op) const { + if (op->IsRegister()) return Operand(ToRegister(op)); + DCHECK(!op->IsDoubleRegister()); + DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); + if (NeedsEagerFrame()) { + return Operand(ebp, StackSlotOffset(op->index())); + } else { + // Retrieve parameter without eager stack-frame relative to the + // stack-pointer. + return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); + } +} + + +Operand LCodeGen::HighOperand(LOperand* op) { + DCHECK(op->IsDoubleStackSlot()); + if (NeedsEagerFrame()) { + return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize); + } else { + // Retrieve parameter without eager stack-frame relative to the + // stack-pointer. + return Operand( + esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); + } +} + + +void LCodeGen::WriteTranslation(LEnvironment* environment, + Translation* translation) { + if (environment == NULL) return; + + // The translation includes one command per value in the environment. + int translation_size = environment->translation_size(); + + WriteTranslation(environment->outer(), translation); + WriteTranslationFrame(environment, translation); + + int object_index = 0; + int dematerialized_index = 0; + for (int i = 0; i < translation_size; ++i) { + LOperand* value = environment->values()->at(i); + AddToTranslation(environment, + translation, + value, + environment->HasTaggedValueAt(i), + environment->HasUint32ValueAt(i), + &object_index, + &dematerialized_index); + } +} + + +void LCodeGen::AddToTranslation(LEnvironment* environment, + Translation* translation, + LOperand* op, + bool is_tagged, + bool is_uint32, + int* object_index_pointer, + int* dematerialized_index_pointer) { + if (op == LEnvironment::materialization_marker()) { + int object_index = (*object_index_pointer)++; + if (environment->ObjectIsDuplicateAt(object_index)) { + int dupe_of = environment->ObjectDuplicateOfAt(object_index); + translation->DuplicateObject(dupe_of); + return; + } + int object_length = environment->ObjectLengthAt(object_index); + if (environment->ObjectIsArgumentsAt(object_index)) { + translation->BeginArgumentsObject(object_length); + } else { + translation->BeginCapturedObject(object_length); + } + int dematerialized_index = *dematerialized_index_pointer; + int env_offset = environment->translation_size() + dematerialized_index; + *dematerialized_index_pointer += object_length; + for (int i = 0; i < object_length; ++i) { + LOperand* value = environment->values()->at(env_offset + i); + AddToTranslation(environment, + translation, + value, + environment->HasTaggedValueAt(env_offset + i), + environment->HasUint32ValueAt(env_offset + i), + object_index_pointer, + dematerialized_index_pointer); + } + return; + } + + if (op->IsStackSlot()) { + int index = op->index(); + if (index >= 0) { + index += StandardFrameConstants::kFixedFrameSize / kPointerSize; + } + if (is_tagged) { + translation->StoreStackSlot(index); + } else if (is_uint32) { + translation->StoreUint32StackSlot(index); + } else { + translation->StoreInt32StackSlot(index); + } + } else if (op->IsDoubleStackSlot()) { + int index = op->index(); + if (index >= 0) { + index += StandardFrameConstants::kFixedFrameSize / kPointerSize; + } + translation->StoreDoubleStackSlot(index); + } else if (op->IsRegister()) { + Register reg = ToRegister(op); + if (is_tagged) { + translation->StoreRegister(reg); + } else if (is_uint32) { + translation->StoreUint32Register(reg); + } else { + translation->StoreInt32Register(reg); + } + } else if (op->IsDoubleRegister()) { + X87Register reg = ToX87Register(op); + translation->StoreDoubleRegister(reg); + } else if (op->IsConstantOperand()) { + HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); + int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); + translation->StoreLiteral(src_index); + } else { + UNREACHABLE(); + } +} + + +void LCodeGen::CallCodeGeneric(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr, + SafepointMode safepoint_mode) { + DCHECK(instr != NULL); + __ call(code, mode); + RecordSafepointWithLazyDeopt(instr, safepoint_mode); + + // Signal that we don't inline smi code before these stubs in the + // optimizing code generator. + if (code->kind() == Code::BINARY_OP_IC || + code->kind() == Code::COMPARE_IC) { + __ nop(); + } +} + + +void LCodeGen::CallCode(Handle<Code> code, + RelocInfo::Mode mode, + LInstruction* instr) { + CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc, + LInstruction* instr, SaveFPRegsMode save_doubles) { + DCHECK(instr != NULL); + DCHECK(instr->HasPointerMap()); + + __ CallRuntime(fun, argc, save_doubles); + + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + + DCHECK(info()->is_calling()); +} + + +void LCodeGen::LoadContextFromDeferred(LOperand* context) { + if (context->IsRegister()) { + if (!ToRegister(context).is(esi)) { + __ mov(esi, ToRegister(context)); + } + } else if (context->IsStackSlot()) { + __ mov(esi, ToOperand(context)); + } else if (context->IsConstantOperand()) { + HConstant* constant = + chunk_->LookupConstant(LConstantOperand::cast(context)); + __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); + } else { + UNREACHABLE(); + } +} + +void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, + int argc, + LInstruction* instr, + LOperand* context) { + LoadContextFromDeferred(context); + + __ CallRuntimeSaveDoubles(id); + RecordSafepointWithRegisters( + instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); + + DCHECK(info()->is_calling()); +} + + +void LCodeGen::RegisterEnvironmentForDeoptimization( + LEnvironment* environment, Safepoint::DeoptMode mode) { + environment->set_has_been_used(); + if (!environment->HasBeenRegistered()) { + // Physical stack frame layout: + // -x ............. -4 0 ..................................... y + // [incoming arguments] [spill slots] [pushed outgoing arguments] + + // Layout of the environment: + // 0 ..................................................... size-1 + // [parameters] [locals] [expression stack including arguments] + + // Layout of the translation: + // 0 ........................................................ size - 1 + 4 + // [expression stack including arguments] [locals] [4 words] [parameters] + // |>------------ translation_size ------------<| + + int frame_count = 0; + int jsframe_count = 0; + for (LEnvironment* e = environment; e != NULL; e = e->outer()) { + ++frame_count; + if (e->frame_type() == JS_FUNCTION) { + ++jsframe_count; + } + } + Translation translation(&translations_, frame_count, jsframe_count, zone()); + WriteTranslation(environment, &translation); + int deoptimization_index = deoptimizations_.length(); + int pc_offset = masm()->pc_offset(); + environment->Register(deoptimization_index, + translation.index(), + (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); + deoptimizations_.Add(environment, zone()); + } +} + + +void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, + Deoptimizer::DeoptReason deopt_reason, + Deoptimizer::BailoutType bailout_type) { + LEnvironment* environment = instr->environment(); + RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); + DCHECK(environment->HasBeenRegistered()); + int id = environment->deoptimization_index(); + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); + if (entry == NULL) { + Abort(kBailoutWasNotPrepared); + return; + } + + if (DeoptEveryNTimes()) { + ExternalReference count = ExternalReference::stress_deopt_count(isolate()); + Label no_deopt; + __ pushfd(); + __ push(eax); + __ mov(eax, Operand::StaticVariable(count)); + __ sub(eax, Immediate(1)); + __ j(not_zero, &no_deopt, Label::kNear); + if (FLAG_trap_on_deopt) __ int3(); + __ mov(eax, Immediate(FLAG_deopt_every_n_times)); + __ mov(Operand::StaticVariable(count), eax); + __ pop(eax); + __ popfd(); + DCHECK(frame_is_built_); + // Put the x87 stack layout in TOS. + if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); + __ push(Immediate(x87_stack_.GetLayout())); + __ fild_s(MemOperand(esp, 0)); + // Don't touch eflags. + __ lea(esp, Operand(esp, kPointerSize)); + __ call(entry, RelocInfo::RUNTIME_ENTRY); + __ bind(&no_deopt); + __ mov(Operand::StaticVariable(count), eax); + __ pop(eax); + __ popfd(); + } + + // Put the x87 stack layout in TOS, so that we can save x87 fp registers in + // the correct location. + { + Label done; + if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); + if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt(); + + int x87_stack_layout = x87_stack_.GetLayout(); + __ push(Immediate(x87_stack_layout)); + __ fild_s(MemOperand(esp, 0)); + // Don't touch eflags. + __ lea(esp, Operand(esp, kPointerSize)); + __ bind(&done); + } + + if (info()->ShouldTrapOnDeopt()) { + Label done; + if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); + __ int3(); + __ bind(&done); + } + + Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason); + + DCHECK(info()->IsStub() || frame_is_built_); + if (cc == no_condition && frame_is_built_) { + DeoptComment(deopt_info); + __ call(entry, RelocInfo::RUNTIME_ENTRY); + info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id); + } else { + Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, + !frame_is_built_); + // We often have several deopts to the same entry, reuse the last + // jump entry if this is the case. + if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() || + jump_table_.is_empty() || + !table_entry.IsEquivalentTo(jump_table_.last())) { + jump_table_.Add(table_entry, zone()); + } + if (cc == no_condition) { + __ jmp(&jump_table_.last().label); + } else { + __ j(cc, &jump_table_.last().label); + } + } +} + + +void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, + Deoptimizer::DeoptReason deopt_reason) { + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + DeoptimizeIf(cc, instr, deopt_reason, bailout_type); +} + + +void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { + int length = deoptimizations_.length(); + if (length == 0) return; + Handle<DeoptimizationInputData> data = + DeoptimizationInputData::New(isolate(), length, TENURED); + + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); + data->SetTranslationByteArray(*translations); + data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); + data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); + if (info_->IsOptimizing()) { + // Reference to shared function info does not change between phases. + AllowDeferredHandleDereference allow_handle_dereference; + data->SetSharedFunctionInfo(*info_->shared_info()); + } else { + data->SetSharedFunctionInfo(Smi::FromInt(0)); + } + data->SetWeakCellCache(Smi::FromInt(0)); + + Handle<FixedArray> literals = + factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); + { AllowDeferredHandleDereference copy_handles; + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); + } + + data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); + data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); + + // Populate the deoptimization entries. + for (int i = 0; i < length; i++) { + LEnvironment* env = deoptimizations_[i]; + data->SetAstId(i, env->ast_id()); + data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); + data->SetArgumentsStackHeight(i, + Smi::FromInt(env->arguments_stack_height())); + data->SetPc(i, Smi::FromInt(env->pc_offset())); + } + code->set_deoptimization_data(*data); +} + + +void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { + DCHECK_EQ(0, deoptimization_literals_.length()); + for (auto function : chunk()->inlined_functions()) { + DefineDeoptimizationLiteral(function); + } + inlined_function_count_ = deoptimization_literals_.length(); +} + + +void LCodeGen::RecordSafepointWithLazyDeopt( + LInstruction* instr, SafepointMode safepoint_mode) { + if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { + RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); + } else { + DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kLazyDeopt); + } +} + + +void LCodeGen::RecordSafepoint( + LPointerMap* pointers, + Safepoint::Kind kind, + int arguments, + Safepoint::DeoptMode deopt_mode) { + DCHECK(kind == expected_safepoint_kind_); + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); + Safepoint safepoint = + safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); + for (int i = 0; i < operands->length(); i++) { + LOperand* pointer = operands->at(i); + if (pointer->IsStackSlot()) { + safepoint.DefinePointerSlot(pointer->index(), zone()); + } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { + safepoint.DefinePointerRegister(ToRegister(pointer), zone()); + } + } +} + + +void LCodeGen::RecordSafepoint(LPointerMap* pointers, + Safepoint::DeoptMode mode) { + RecordSafepoint(pointers, Safepoint::kSimple, 0, mode); +} + + +void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { + LPointerMap empty_pointers(zone()); + RecordSafepoint(&empty_pointers, mode); +} + + +void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, + int arguments, + Safepoint::DeoptMode mode) { + RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode); +} + + +void LCodeGen::RecordAndWritePosition(int position) { + if (position == RelocInfo::kNoPosition) return; + masm()->positions_recorder()->RecordPosition(position); + masm()->positions_recorder()->WriteRecordedPositions(); +} + + +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + +void LCodeGen::DoLabel(LLabel* label) { + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), + label->block_id(), + LabelType(label)); + __ bind(label->label()); + current_block_ = label->block_id(); + if (label->block()->predecessors()->length() > 1) { + // A join block's x87 stack is that of its last visited predecessor. + // If the last visited predecessor block is unreachable, the stack state + // will be wrong. In such case, use the x87 stack of reachable predecessor. + X87StackMap::const_iterator it = x87_stack_map_.find(current_block_); + // Restore x87 stack. + if (it != x87_stack_map_.end()) { + x87_stack_ = *(it->second); + } + } + DoGap(label); +} + + +void LCodeGen::DoParallelMove(LParallelMove* move) { + resolver_.Resolve(move); +} + + +void LCodeGen::DoGap(LGap* gap) { + for (int i = LGap::FIRST_INNER_POSITION; + i <= LGap::LAST_INNER_POSITION; + i++) { + LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); + LParallelMove* move = gap->GetParallelMove(inner_pos); + if (move != NULL) DoParallelMove(move); + } +} + + +void LCodeGen::DoInstructionGap(LInstructionGap* instr) { + DoGap(instr); +} + + +void LCodeGen::DoParameter(LParameter* instr) { + // Nothing to do. +} + + +void LCodeGen::DoCallStub(LCallStub* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->result()).is(eax)); + switch (instr->hydrogen()->major_key()) { + case CodeStub::RegExpExec: { + RegExpExecStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + case CodeStub::SubString: { + SubStringStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + break; + } + default: + UNREACHABLE(); + } +} + + +void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { + GenerateOsrPrologue(); +} + + +void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(dividend.is(ToRegister(instr->result()))); + + // Theoretically, a variation of the branch-free code for integer division by + // a power of 2 (calculating the remainder via an additional multiplication + // (which gets simplified to an 'and') and subtraction) should be faster, and + // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to + // indicate that positive dividends are heavily favored, so the branching + // version performs better. + HMod* hmod = instr->hydrogen(); + int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); + Label dividend_is_not_negative, done; + if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { + __ test(dividend, dividend); + __ j(not_sign, ÷nd_is_not_negative, Label::kNear); + // Note that this is correct even for kMinInt operands. + __ neg(dividend); + __ and_(dividend, mask); + __ neg(dividend); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); + } + __ jmp(&done, Label::kNear); + } + + __ bind(÷nd_is_not_negative); + __ and_(dividend, mask); + __ bind(&done); +} + + +void LCodeGen::DoModByConstI(LModByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(ToRegister(instr->result()).is(eax)); + + if (divisor == 0) { + DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); + return; + } + + __ TruncatingDiv(dividend, Abs(divisor)); + __ imul(edx, edx, Abs(divisor)); + __ mov(eax, dividend); + __ sub(eax, edx); + + // Check for negative zero. + HMod* hmod = instr->hydrogen(); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label remainder_not_zero; + __ j(not_zero, &remainder_not_zero, Label::kNear); + __ cmp(dividend, Immediate(0)); + DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); + __ bind(&remainder_not_zero); + } +} + + +void LCodeGen::DoModI(LModI* instr) { + HMod* hmod = instr->hydrogen(); + + Register left_reg = ToRegister(instr->left()); + DCHECK(left_reg.is(eax)); + Register right_reg = ToRegister(instr->right()); + DCHECK(!right_reg.is(eax)); + DCHECK(!right_reg.is(edx)); + Register result_reg = ToRegister(instr->result()); + DCHECK(result_reg.is(edx)); + + Label done; + // Check for x % 0, idiv would signal a divide error. We have to + // deopt in this case because we can't return a NaN. + if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(right_reg, Operand(right_reg)); + DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); + } + + // Check for kMinInt % -1, idiv would signal a divide error. We + // have to deopt if we care about -0, because we can't return that. + if (hmod->CheckFlag(HValue::kCanOverflow)) { + Label no_overflow_possible; + __ cmp(left_reg, kMinInt); + __ j(not_equal, &no_overflow_possible, Label::kNear); + __ cmp(right_reg, -1); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero); + } else { + __ j(not_equal, &no_overflow_possible, Label::kNear); + __ Move(result_reg, Immediate(0)); + __ jmp(&done, Label::kNear); + } + __ bind(&no_overflow_possible); + } + + // Sign extend dividend in eax into edx:eax. + __ cdq(); + + // If we care about -0, test if the dividend is <0 and the result is 0. + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label positive_left; + __ test(left_reg, Operand(left_reg)); + __ j(not_sign, &positive_left, Label::kNear); + __ idiv(right_reg); + __ test(result_reg, Operand(result_reg)); + DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); + __ jmp(&done, Label::kNear); + __ bind(&positive_left); + } + __ idiv(right_reg); + __ bind(&done); +} + + +void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + Register result = ToRegister(instr->result()); + DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); + DCHECK(!result.is(dividend)); + + // Check for (0 / -x) that will produce negative zero. + HDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + __ test(dividend, dividend); + DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); + } + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { + __ cmp(dividend, kMinInt); + DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); + } + // Deoptimize if remainder will not be 0. + if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && + divisor != 1 && divisor != -1) { + int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); + __ test(dividend, Immediate(mask)); + DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); + } + __ Move(result, dividend); + int32_t shift = WhichPowerOf2Abs(divisor); + if (shift > 0) { + // The arithmetic shift is always OK, the 'if' is an optimization only. + if (shift > 1) __ sar(result, 31); + __ shr(result, 32 - shift); + __ add(result, dividend); + __ sar(result, shift); + } + if (divisor < 0) __ neg(result); +} + + +void LCodeGen::DoDivByConstI(LDivByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(ToRegister(instr->result()).is(edx)); + + if (divisor == 0) { + DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); + return; + } + + // Check for (0 / -x) that will produce negative zero. + HDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + __ test(dividend, dividend); + DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); + } + + __ TruncatingDiv(dividend, Abs(divisor)); + if (divisor < 0) __ neg(edx); + + if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { + __ mov(eax, edx); + __ imul(eax, eax, divisor); + __ sub(eax, dividend); + DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); + } +} + + +// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. +void LCodeGen::DoDivI(LDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + Register remainder = ToRegister(instr->temp()); + DCHECK(dividend.is(eax)); + DCHECK(remainder.is(edx)); + DCHECK(ToRegister(instr->result()).is(eax)); + DCHECK(!divisor.is(eax)); + DCHECK(!divisor.is(edx)); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(divisor, divisor); + DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label dividend_not_zero; + __ test(dividend, dividend); + __ j(not_zero, ÷nd_not_zero, Label::kNear); + __ test(divisor, divisor); + DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); + __ bind(÷nd_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow)) { + Label dividend_not_min_int; + __ cmp(dividend, kMinInt); + __ j(not_zero, ÷nd_not_min_int, Label::kNear); + __ cmp(divisor, -1); + DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); + __ bind(÷nd_not_min_int); + } + + // Sign extend to edx (= remainder). + __ cdq(); + __ idiv(divisor); + + if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { + // Deoptimize if remainder is not 0. + __ test(remainder, remainder); + DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); + } +} + + +void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(dividend.is(ToRegister(instr->result()))); + + // If the divisor is positive, things are easy: There can be no deopts and we + // can simply do an arithmetic right shift. + if (divisor == 1) return; + int32_t shift = WhichPowerOf2Abs(divisor); + if (divisor > 1) { + __ sar(dividend, shift); + return; + } + + // If the divisor is negative, we have to negate and handle edge cases. + __ neg(dividend); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); + } + + // Dividing by -1 is basically negation, unless we overflow. + if (divisor == -1) { + if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); + } + return; + } + + // If the negation could not overflow, simply shifting is OK. + if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { + __ sar(dividend, shift); + return; + } + + Label not_kmin_int, done; + __ j(no_overflow, ¬_kmin_int, Label::kNear); + __ mov(dividend, Immediate(kMinInt / divisor)); + __ jmp(&done, Label::kNear); + __ bind(¬_kmin_int); + __ sar(dividend, shift); + __ bind(&done); +} + + +void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { + Register dividend = ToRegister(instr->dividend()); + int32_t divisor = instr->divisor(); + DCHECK(ToRegister(instr->result()).is(edx)); + + if (divisor == 0) { + DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); + return; + } + + // Check for (0 / -x) that will produce negative zero. + HMathFloorOfDiv* hdiv = instr->hydrogen(); + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { + __ test(dividend, dividend); + DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); + } + + // Easy case: We need no dynamic check for the dividend and the flooring + // division is the same as the truncating division. + if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || + (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { + __ TruncatingDiv(dividend, Abs(divisor)); + if (divisor < 0) __ neg(edx); + return; + } + + // In the general case we may need to adjust before and after the truncating + // division to get a flooring division. + Register temp = ToRegister(instr->temp3()); + DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx)); + Label needs_adjustment, done; + __ cmp(dividend, Immediate(0)); + __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear); + __ TruncatingDiv(dividend, Abs(divisor)); + if (divisor < 0) __ neg(edx); + __ jmp(&done, Label::kNear); + __ bind(&needs_adjustment); + __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1)); + __ TruncatingDiv(temp, Abs(divisor)); + if (divisor < 0) __ neg(edx); + __ dec(edx); + __ bind(&done); +} + + +// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. +void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { + HBinaryOperation* hdiv = instr->hydrogen(); + Register dividend = ToRegister(instr->dividend()); + Register divisor = ToRegister(instr->divisor()); + Register remainder = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + DCHECK(dividend.is(eax)); + DCHECK(remainder.is(edx)); + DCHECK(result.is(eax)); + DCHECK(!divisor.is(eax)); + DCHECK(!divisor.is(edx)); + + // Check for x / 0. + if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { + __ test(divisor, divisor); + DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); + } + + // Check for (0 / -x) that will produce negative zero. + if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label dividend_not_zero; + __ test(dividend, dividend); + __ j(not_zero, ÷nd_not_zero, Label::kNear); + __ test(divisor, divisor); + DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); + __ bind(÷nd_not_zero); + } + + // Check for (kMinInt / -1). + if (hdiv->CheckFlag(HValue::kCanOverflow)) { + Label dividend_not_min_int; + __ cmp(dividend, kMinInt); + __ j(not_zero, ÷nd_not_min_int, Label::kNear); + __ cmp(divisor, -1); + DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); + __ bind(÷nd_not_min_int); + } + + // Sign extend to edx (= remainder). + __ cdq(); + __ idiv(divisor); + + Label done; + __ test(remainder, remainder); + __ j(zero, &done, Label::kNear); + __ xor_(remainder, divisor); + __ sar(remainder, 31); + __ add(result, remainder); + __ bind(&done); +} + + +void LCodeGen::DoMulI(LMulI* instr) { + Register left = ToRegister(instr->left()); + LOperand* right = instr->right(); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ mov(ToRegister(instr->temp()), left); + } + + if (right->IsConstantOperand()) { + // Try strength reductions on the multiplication. + // All replacement instructions are at most as long as the imul + // and have better latency. + int constant = ToInteger32(LConstantOperand::cast(right)); + if (constant == -1) { + __ neg(left); + } else if (constant == 0) { + __ xor_(left, Operand(left)); + } else if (constant == 2) { + __ add(left, Operand(left)); + } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + // If we know that the multiplication can't overflow, it's safe to + // use instructions that don't set the overflow flag for the + // multiplication. + switch (constant) { + case 1: + // Do nothing. + break; + case 3: + __ lea(left, Operand(left, left, times_2, 0)); + break; + case 4: + __ shl(left, 2); + break; + case 5: + __ lea(left, Operand(left, left, times_4, 0)); + break; + case 8: + __ shl(left, 3); + break; + case 9: + __ lea(left, Operand(left, left, times_8, 0)); + break; + case 16: + __ shl(left, 4); + break; + default: + __ imul(left, left, constant); + break; + } + } else { + __ imul(left, left, constant); + } + } else { + if (instr->hydrogen()->representation().IsSmi()) { + __ SmiUntag(left); + } + __ imul(left, ToOperand(right)); + } + + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); + } + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // Bail out if the result is supposed to be negative zero. + Label done; + __ test(left, Operand(left)); + __ j(not_zero, &done); + if (right->IsConstantOperand()) { + if (ToInteger32(LConstantOperand::cast(right)) < 0) { + DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); + } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { + __ cmp(ToRegister(instr->temp()), Immediate(0)); + DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); + } + } else { + // Test the non-zero operand for negative sign. + __ or_(ToRegister(instr->temp()), ToOperand(right)); + DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); + } + __ bind(&done); + } +} + + +void LCodeGen::DoBitI(LBitI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); + + if (right->IsConstantOperand()) { + int32_t right_operand = + ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->representation()); + switch (instr->op()) { + case Token::BIT_AND: + __ and_(ToRegister(left), right_operand); + break; + case Token::BIT_OR: + __ or_(ToRegister(left), right_operand); + break; + case Token::BIT_XOR: + if (right_operand == int32_t(~0)) { + __ not_(ToRegister(left)); + } else { + __ xor_(ToRegister(left), right_operand); + } + break; + default: + UNREACHABLE(); + break; + } + } else { + switch (instr->op()) { + case Token::BIT_AND: + __ and_(ToRegister(left), ToOperand(right)); + break; + case Token::BIT_OR: + __ or_(ToRegister(left), ToOperand(right)); + break; + case Token::BIT_XOR: + __ xor_(ToRegister(left), ToOperand(right)); + break; + default: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoShiftI(LShiftI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + DCHECK(left->IsRegister()); + if (right->IsRegister()) { + DCHECK(ToRegister(right).is(ecx)); + + switch (instr->op()) { + case Token::ROR: + __ ror_cl(ToRegister(left)); + break; + case Token::SAR: + __ sar_cl(ToRegister(left)); + break; + case Token::SHR: + __ shr_cl(ToRegister(left)); + if (instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue); + } + break; + case Token::SHL: + __ shl_cl(ToRegister(left)); + break; + default: + UNREACHABLE(); + break; + } + } else { + int value = ToInteger32(LConstantOperand::cast(right)); + uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); + switch (instr->op()) { + case Token::ROR: + if (shift_count == 0 && instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue); + } else { + __ ror(ToRegister(left), shift_count); + } + break; + case Token::SAR: + if (shift_count != 0) { + __ sar(ToRegister(left), shift_count); + } + break; + case Token::SHR: + if (shift_count != 0) { + __ shr(ToRegister(left), shift_count); + } else if (instr->can_deopt()) { + __ test(ToRegister(left), ToRegister(left)); + DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue); + } + break; + case Token::SHL: + if (shift_count != 0) { + if (instr->hydrogen_value()->representation().IsSmi() && + instr->can_deopt()) { + if (shift_count != 1) { + __ shl(ToRegister(left), shift_count - 1); + } + __ SmiTag(ToRegister(left)); + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); + } else { + __ shl(ToRegister(left), shift_count); + } + } + break; + default: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoSubI(LSubI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + + if (right->IsConstantOperand()) { + __ sub(ToOperand(left), + ToImmediate(right, instr->hydrogen()->representation())); + } else { + __ sub(ToRegister(left), ToOperand(right)); + } + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); + } +} + + +void LCodeGen::DoConstantI(LConstantI* instr) { + __ Move(ToRegister(instr->result()), Immediate(instr->value())); +} + + +void LCodeGen::DoConstantS(LConstantS* instr) { + __ Move(ToRegister(instr->result()), Immediate(instr->value())); +} + + +void LCodeGen::DoConstantD(LConstantD* instr) { + uint64_t const bits = instr->bits(); + uint32_t const lower = static_cast<uint32_t>(bits); + uint32_t const upper = static_cast<uint32_t>(bits >> 32); + DCHECK(instr->result()->IsDoubleRegister()); + + __ push(Immediate(upper)); + __ push(Immediate(lower)); + X87Register reg = ToX87Register(instr->result()); + X87Mov(reg, Operand(esp, 0)); + __ add(Operand(esp), Immediate(kDoubleSize)); +} + + +void LCodeGen::DoConstantE(LConstantE* instr) { + __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); +} + + +void LCodeGen::DoConstantT(LConstantT* instr) { + Register reg = ToRegister(instr->result()); + Handle<Object> object = instr->value(isolate()); + AllowDeferredHandleDereference smi_check; + __ LoadObject(reg, object); +} + + +void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { + Register result = ToRegister(instr->result()); + Register map = ToRegister(instr->value()); + __ EnumLength(result, map); +} + + +void LCodeGen::DoDateField(LDateField* instr) { + Register object = ToRegister(instr->date()); + Register result = ToRegister(instr->result()); + Register scratch = ToRegister(instr->temp()); + Smi* index = instr->index(); + DCHECK(object.is(result)); + DCHECK(object.is(eax)); + + if (index->value() == 0) { + __ mov(result, FieldOperand(object, JSDate::kValueOffset)); + } else { + Label runtime, done; + if (index->value() < JSDate::kFirstUncachedField) { + ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); + __ mov(scratch, Operand::StaticVariable(stamp)); + __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); + __ j(not_equal, &runtime, Label::kNear); + __ mov(result, FieldOperand(object, JSDate::kValueOffset + + kPointerSize * index->value())); + __ jmp(&done, Label::kNear); + } + __ bind(&runtime); + __ PrepareCallCFunction(2, scratch); + __ mov(Operand(esp, 0), object); + __ mov(Operand(esp, 1 * kPointerSize), Immediate(index)); + __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); + __ bind(&done); + } +} + + +Operand LCodeGen::BuildSeqStringOperand(Register string, + LOperand* index, + String::Encoding encoding) { + if (index->IsConstantOperand()) { + int offset = ToRepresentation(LConstantOperand::cast(index), + Representation::Integer32()); + if (encoding == String::TWO_BYTE_ENCODING) { + offset *= kUC16Size; + } + STATIC_ASSERT(kCharSize == 1); + return FieldOperand(string, SeqString::kHeaderSize + offset); + } + return FieldOperand( + string, ToRegister(index), + encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, + SeqString::kHeaderSize); +} + + +void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { + String::Encoding encoding = instr->hydrogen()->encoding(); + Register result = ToRegister(instr->result()); + Register string = ToRegister(instr->string()); + + if (FLAG_debug_code) { + __ push(string); + __ mov(string, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset)); + + __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(equal, kUnexpectedStringType); + __ pop(string); + } + + Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); + if (encoding == String::ONE_BYTE_ENCODING) { + __ movzx_b(result, operand); + } else { + __ movzx_w(result, operand); + } +} + + +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + String::Encoding encoding = instr->hydrogen()->encoding(); + Register string = ToRegister(instr->string()); + + if (FLAG_debug_code) { + Register value = ToRegister(instr->value()); + Register index = ToRegister(instr->index()); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + int encoding_mask = + instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type; + __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); + } + + Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); + if (instr->value()->IsConstantOperand()) { + int value = ToRepresentation(LConstantOperand::cast(instr->value()), + Representation::Integer32()); + DCHECK_LE(0, value); + if (encoding == String::ONE_BYTE_ENCODING) { + DCHECK_LE(value, String::kMaxOneByteCharCode); + __ mov_b(operand, static_cast<int8_t>(value)); + } else { + DCHECK_LE(value, String::kMaxUtf16CodeUnit); + __ mov_w(operand, static_cast<int16_t>(value)); + } + } else { + Register value = ToRegister(instr->value()); + if (encoding == String::ONE_BYTE_ENCODING) { + __ mov_b(operand, value); + } else { + __ mov_w(operand, value); + } + } +} + + +void LCodeGen::DoAddI(LAddI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + + if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { + if (right->IsConstantOperand()) { + int32_t offset = ToRepresentation(LConstantOperand::cast(right), + instr->hydrogen()->representation()); + __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); + } else { + Operand address(ToRegister(left), ToRegister(right), times_1, 0); + __ lea(ToRegister(instr->result()), address); + } + } else { + if (right->IsConstantOperand()) { + __ add(ToOperand(left), + ToImmediate(right, instr->hydrogen()->representation())); + } else { + __ add(ToRegister(left), ToOperand(right)); + } + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); + } + } +} + + +void LCodeGen::DoMathMinMax(LMathMinMax* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + DCHECK(left->Equals(instr->result())); + HMathMinMax::Operation operation = instr->hydrogen()->operation(); + if (instr->hydrogen()->representation().IsSmiOrInteger32()) { + Label return_left; + Condition condition = (operation == HMathMinMax::kMathMin) + ? less_equal + : greater_equal; + if (right->IsConstantOperand()) { + Operand left_op = ToOperand(left); + Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()), + instr->hydrogen()->representation()); + __ cmp(left_op, immediate); + __ j(condition, &return_left, Label::kNear); + __ mov(left_op, immediate); + } else { + Register left_reg = ToRegister(left); + Operand right_op = ToOperand(right); + __ cmp(left_reg, right_op); + __ j(condition, &return_left, Label::kNear); + __ mov(left_reg, right_op); + } + __ bind(&return_left); + } else { + DCHECK(instr->hydrogen()->representation().IsDouble()); + Label check_nan_left, check_zero, return_left, return_right; + Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; + X87Register left_reg = ToX87Register(left); + X87Register right_reg = ToX87Register(right); + + X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result())); + __ fld(1); + __ fld(1); + __ FCmp(); + __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. + __ j(equal, &check_zero, Label::kNear); // left == right. + __ j(condition, &return_left, Label::kNear); + __ jmp(&return_right, Label::kNear); + + __ bind(&check_zero); + __ fld(0); + __ fldz(); + __ FCmp(); + __ j(not_equal, &return_left, Label::kNear); // left == right != 0. + // At this point, both left and right are either 0 or -0. + if (operation == HMathMinMax::kMathMin) { + // Push st0 and st1 to stack, then pop them to temp registers and OR them, + // load it to left. + Register scratch_reg = ToRegister(instr->temp()); + __ fld(1); + __ fld(1); + __ sub(esp, Immediate(2 * kPointerSize)); + __ fstp_s(MemOperand(esp, 0)); + __ fstp_s(MemOperand(esp, kPointerSize)); + __ pop(scratch_reg); + __ xor_(MemOperand(esp, 0), scratch_reg); + X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand); + __ pop(scratch_reg); // restore esp + } else { + // Since we operate on +0 and/or -0, addsd and andsd have the same effect. + X87Fxch(left_reg); + __ fadd(1); + } + __ jmp(&return_left, Label::kNear); + + __ bind(&check_nan_left); + __ fld(0); + __ fld(0); + __ FCmp(); // NaN check. + __ j(parity_even, &return_left, Label::kNear); // left == NaN. + + __ bind(&return_right); + X87Fxch(left_reg); + X87Mov(left_reg, right_reg); + + __ bind(&return_left); + } +} + + +void LCodeGen::DoArithmeticD(LArithmeticD* instr) { + X87Register left = ToX87Register(instr->left()); + X87Register right = ToX87Register(instr->right()); + X87Register result = ToX87Register(instr->result()); + if (instr->op() != Token::MOD) { + X87PrepareBinaryOp(left, right, result); + } + // Set the precision control to double-precision. + __ X87SetFPUCW(0x027F); + switch (instr->op()) { + case Token::ADD: + __ fadd_i(1); + break; + case Token::SUB: + __ fsub_i(1); + break; + case Token::MUL: + __ fmul_i(1); + break; + case Token::DIV: + __ fdiv_i(1); + break; + case Token::MOD: { + // Pass two doubles as arguments on the stack. + __ PrepareCallCFunction(4, eax); + X87Mov(Operand(esp, 1 * kDoubleSize), right); + X87Mov(Operand(esp, 0), left); + X87Free(right); + DCHECK(left.is(result)); + X87PrepareToWrite(result); + __ CallCFunction( + ExternalReference::mod_two_doubles_operation(isolate()), + 4); + + // Return value is in st(0) on ia32. + X87CommitWrite(result); + break; + } + default: + UNREACHABLE(); + break; + } + + // Restore the default value of control word. + __ X87SetFPUCW(0x037F); +} + + +void LCodeGen::DoArithmeticT(LArithmeticT* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + DCHECK(ToRegister(instr->result()).is(eax)); + + Handle<Code> code = + CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code(); + CallCode(code, RelocInfo::CODE_TARGET, instr); +} + + +template<class InstrType> +void LCodeGen::EmitBranch(InstrType instr, Condition cc) { + int left_block = instr->TrueDestination(chunk_); + int right_block = instr->FalseDestination(chunk_); + + int next_block = GetNextEmittedBlock(); + + if (right_block == left_block || cc == no_condition) { + EmitGoto(left_block); + } else if (left_block == next_block) { + __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); + } else if (right_block == next_block) { + __ j(cc, chunk_->GetAssemblyLabel(left_block)); + } else { + __ j(cc, chunk_->GetAssemblyLabel(left_block)); + __ jmp(chunk_->GetAssemblyLabel(right_block)); + } +} + + +template <class InstrType> +void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { + int true_block = instr->TrueDestination(chunk_); + if (cc == no_condition) { + __ jmp(chunk_->GetAssemblyLabel(true_block)); + } else { + __ j(cc, chunk_->GetAssemblyLabel(true_block)); + } +} + + +template<class InstrType> +void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { + int false_block = instr->FalseDestination(chunk_); + if (cc == no_condition) { + __ jmp(chunk_->GetAssemblyLabel(false_block)); + } else { + __ j(cc, chunk_->GetAssemblyLabel(false_block)); + } +} + + +void LCodeGen::DoBranch(LBranch* instr) { + Representation r = instr->hydrogen()->value()->representation(); + if (r.IsSmiOrInteger32()) { + Register reg = ToRegister(instr->value()); + __ test(reg, Operand(reg)); + EmitBranch(instr, not_zero); + } else if (r.IsDouble()) { + X87Register reg = ToX87Register(instr->value()); + X87LoadForUsage(reg); + __ fldz(); + __ FCmp(); + EmitBranch(instr, not_zero); + } else { + DCHECK(r.IsTagged()); + Register reg = ToRegister(instr->value()); + HType type = instr->hydrogen()->value()->type(); + if (type.IsBoolean()) { + DCHECK(!info()->IsStub()); + __ cmp(reg, factory()->true_value()); + EmitBranch(instr, equal); + } else if (type.IsSmi()) { + DCHECK(!info()->IsStub()); + __ test(reg, Operand(reg)); + EmitBranch(instr, not_equal); + } else if (type.IsJSArray()) { + DCHECK(!info()->IsStub()); + EmitBranch(instr, no_condition); + } else if (type.IsHeapNumber()) { + UNREACHABLE(); + } else if (type.IsString()) { + DCHECK(!info()->IsStub()); + __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); + EmitBranch(instr, not_equal); + } else { + ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); + + if (expected.Contains(ToBooleanStub::UNDEFINED)) { + // undefined -> false. + __ cmp(reg, factory()->undefined_value()); + __ j(equal, instr->FalseLabel(chunk_)); + } + if (expected.Contains(ToBooleanStub::BOOLEAN)) { + // true -> true. + __ cmp(reg, factory()->true_value()); + __ j(equal, instr->TrueLabel(chunk_)); + // false -> false. + __ cmp(reg, factory()->false_value()); + __ j(equal, instr->FalseLabel(chunk_)); + } + if (expected.Contains(ToBooleanStub::NULL_TYPE)) { + // 'null' -> false. + __ cmp(reg, factory()->null_value()); + __ j(equal, instr->FalseLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::SMI)) { + // Smis: 0 -> false, all other -> true. + __ test(reg, Operand(reg)); + __ j(equal, instr->FalseLabel(chunk_)); + __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); + } else if (expected.NeedsMap()) { + // If we need a map later and have a Smi -> deopt. + __ test(reg, Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr, Deoptimizer::kSmi); + } + + Register map = no_reg; // Keep the compiler happy. + if (expected.NeedsMap()) { + map = ToRegister(instr->temp()); + DCHECK(!map.is(reg)); + __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); + + if (expected.CanBeUndetectable()) { + // Undetectable -> false. + __ test_b(FieldOperand(map, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(not_zero, instr->FalseLabel(chunk_)); + } + } + + if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { + // spec object -> true. + __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); + __ j(above_equal, instr->TrueLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::STRING)) { + // String value -> false iff empty. + Label not_string; + __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); + __ j(above_equal, ¬_string, Label::kNear); + __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); + __ j(not_zero, instr->TrueLabel(chunk_)); + __ jmp(instr->FalseLabel(chunk_)); + __ bind(¬_string); + } + + if (expected.Contains(ToBooleanStub::SYMBOL)) { + // Symbol value -> true. + __ CmpInstanceType(map, SYMBOL_TYPE); + __ j(equal, instr->TrueLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::SIMD_VALUE)) { + // SIMD value -> true. + __ CmpInstanceType(map, SIMD128_VALUE_TYPE); + __ j(equal, instr->TrueLabel(chunk_)); + } + + if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { + // heap number -> false iff +0, -0, or NaN. + Label not_heap_number; + __ cmp(FieldOperand(reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(not_equal, ¬_heap_number, Label::kNear); + __ fldz(); + __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); + __ FCmp(); + __ j(zero, instr->FalseLabel(chunk_)); + __ jmp(instr->TrueLabel(chunk_)); + __ bind(¬_heap_number); + } + + if (!expected.IsGeneric()) { + // We've seen something for the first time -> deopt. + // This can only happen if we are not generic already. + DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject); + } + } + } +} + + +void LCodeGen::EmitGoto(int block) { + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); + } +} + + +void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { +} + + +void LCodeGen::DoGoto(LGoto* instr) { + EmitGoto(instr->block_id()); +} + + +Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { + Condition cond = no_condition; + switch (op) { + case Token::EQ: + case Token::EQ_STRICT: + cond = equal; + break; + case Token::NE: + case Token::NE_STRICT: + cond = not_equal; + break; + case Token::LT: + cond = is_unsigned ? below : less; + break; + case Token::GT: + cond = is_unsigned ? above : greater; + break; + case Token::LTE: + cond = is_unsigned ? below_equal : less_equal; + break; + case Token::GTE: + cond = is_unsigned ? above_equal : greater_equal; + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } + return cond; +} + + +void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + bool is_unsigned = + instr->is_double() || + instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || + instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); + Condition cc = TokenToCondition(instr->op(), is_unsigned); + + if (left->IsConstantOperand() && right->IsConstantOperand()) { + // We can statically evaluate the comparison. + double left_val = ToDouble(LConstantOperand::cast(left)); + double right_val = ToDouble(LConstantOperand::cast(right)); + int next_block = EvalComparison(instr->op(), left_val, right_val) ? + instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); + EmitGoto(next_block); + } else { + if (instr->is_double()) { + X87LoadForUsage(ToX87Register(right), ToX87Register(left)); + __ FCmp(); + // Don't base result on EFLAGS when a NaN is involved. Instead + // jump to the false block. + __ j(parity_even, instr->FalseLabel(chunk_)); + } else { + if (right->IsConstantOperand()) { + __ cmp(ToOperand(left), + ToImmediate(right, instr->hydrogen()->representation())); + } else if (left->IsConstantOperand()) { + __ cmp(ToOperand(right), + ToImmediate(left, instr->hydrogen()->representation())); + // We commuted the operands, so commute the condition. + cc = CommuteCondition(cc); + } else { + __ cmp(ToRegister(left), ToOperand(right)); + } + } + EmitBranch(instr, cc); + } +} + + +void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { + Register left = ToRegister(instr->left()); + + if (instr->right()->IsConstantOperand()) { + Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); + __ CmpObject(left, right); + } else { + Operand right = ToOperand(instr->right()); + __ cmp(left, right); + } + EmitBranch(instr, equal); +} + + +void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { + if (instr->hydrogen()->representation().IsTagged()) { + Register input_reg = ToRegister(instr->object()); + __ cmp(input_reg, factory()->the_hole_value()); + EmitBranch(instr, equal); + return; + } + + // Put the value to the top of stack + X87Register src = ToX87Register(instr->object()); + X87LoadForUsage(src); + __ fld(0); + __ fld(0); + __ FCmp(); + Label ok; + __ j(parity_even, &ok, Label::kNear); + __ fstp(0); + EmitFalseBranch(instr, no_condition); + __ bind(&ok); + + + __ sub(esp, Immediate(kDoubleSize)); + __ fstp_d(MemOperand(esp, 0)); + + __ add(esp, Immediate(kDoubleSize)); + int offset = sizeof(kHoleNanUpper32); + // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff), + // so we check the upper with 0xffffffff for hole as a temporary fix. + __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff)); + EmitBranch(instr, equal); +} + + +void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { + Representation rep = instr->hydrogen()->value()->representation(); + DCHECK(!rep.IsInteger32()); + + if (rep.IsDouble()) { + X87Register input = ToX87Register(instr->value()); + X87LoadForUsage(input); + __ FXamMinusZero(); + EmitBranch(instr, equal); + } else { + Register value = ToRegister(instr->value()); + Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); + __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); + __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), + Immediate(0x1)); + EmitFalseBranch(instr, no_overflow); + __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), + Immediate(0x00000000)); + EmitBranch(instr, equal); + } +} + + +Condition LCodeGen::EmitIsString(Register input, + Register temp1, + Label* is_not_string, + SmiCheck check_needed = INLINE_SMI_CHECK) { + if (check_needed == INLINE_SMI_CHECK) { + __ JumpIfSmi(input, is_not_string); + } + + Condition cond = masm_->IsObjectStringType(input, temp1, temp1); + + return cond; +} + + +void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { + Register reg = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + + Condition true_cond = EmitIsString( + reg, temp, instr->FalseLabel(chunk_), check_needed); + + EmitBranch(instr, true_cond); +} + + +void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { + Operand input = ToOperand(instr->value()); + + __ test(input, Immediate(kSmiTagMask)); + EmitBranch(instr, zero); +} + + +void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(input, instr->FalseLabel(chunk_)); + } + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ test_b(FieldOperand(temp, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + EmitBranch(instr, not_zero); +} + + +static Condition ComputeCompareCondition(Token::Value op) { + switch (op) { + case Token::EQ_STRICT: + case Token::EQ: + return equal; + case Token::LT: + return less; + case Token::GT: + return greater; + case Token::LTE: + return less_equal; + case Token::GTE: + return greater_equal; + default: + UNREACHABLE(); + return no_condition; + } +} + + +void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + + Handle<Code> code = CodeFactory::StringCompare(isolate()).code(); + CallCode(code, RelocInfo::CODE_TARGET, instr); + __ test(eax, eax); + + EmitBranch(instr, ComputeCompareCondition(instr->op())); +} + + +static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == FIRST_TYPE) return to; + DCHECK(from == to || to == LAST_TYPE); + return from; +} + + +static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { + InstanceType from = instr->from(); + InstanceType to = instr->to(); + if (from == to) return equal; + if (to == LAST_TYPE) return above_equal; + if (from == FIRST_TYPE) return below_equal; + UNREACHABLE(); + return equal; +} + + +void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + __ JumpIfSmi(input, instr->FalseLabel(chunk_)); + } + + __ CmpObjectType(input, TestType(instr->hydrogen()), temp); + EmitBranch(instr, BranchCondition(instr->hydrogen())); +} + + +void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + + __ AssertString(input); + + __ mov(result, FieldOperand(input, String::kHashFieldOffset)); + __ IndexFromHash(result, result); +} + + +void LCodeGen::DoHasCachedArrayIndexAndBranch( + LHasCachedArrayIndexAndBranch* instr) { + Register input = ToRegister(instr->value()); + + __ test(FieldOperand(input, String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + EmitBranch(instr, equal); +} + + +// Branches to a label or falls through with the answer in the z flag. Trashes +// the temp registers, but not the input. +void LCodeGen::EmitClassOfTest(Label* is_true, + Label* is_false, + Handle<String>class_name, + Register input, + Register temp, + Register temp2) { + DCHECK(!input.is(temp)); + DCHECK(!input.is(temp2)); + DCHECK(!temp.is(temp2)); + __ JumpIfSmi(input, is_false); + + if (String::Equals(isolate()->factory()->Function_string(), class_name)) { + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); + __ j(below, is_false); + __ j(equal, is_true); + __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); + __ j(equal, is_true); + } else { + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ j(above, is_false); + } + + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. + // Check if the constructor in the map is a function. + __ GetMapConstructor(temp, temp, temp2); + // Objects with a non-function constructor have class 'Object'. + __ CmpInstanceType(temp2, JS_FUNCTION_TYPE); + if (String::Equals(class_name, isolate()->factory()->Object_string())) { + __ j(not_equal, is_true); + } else { + __ j(not_equal, is_false); + } + + // temp now contains the constructor function. Grab the + // instance class name from there. + __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); + __ mov(temp, FieldOperand(temp, + SharedFunctionInfo::kInstanceClassNameOffset)); + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created + // classes and it doesn't have to because you can't access it with natives + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. + __ cmp(temp, class_name); + // End with the answer in the z flag. +} + + +void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + Register temp2 = ToRegister(instr->temp2()); + + Handle<String> class_name = instr->hydrogen()->class_name(); + + EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), + class_name, input, temp, temp2); + + EmitBranch(instr, equal); +} + + +void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { + Register reg = ToRegister(instr->value()); + __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); + EmitBranch(instr, equal); +} + + +void LCodeGen::DoInstanceOf(LInstanceOf* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister())); + DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + InstanceOfStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoHasInPrototypeChainAndBranch( + LHasInPrototypeChainAndBranch* instr) { + Register const object = ToRegister(instr->object()); + Register const object_map = ToRegister(instr->scratch()); + Register const object_prototype = object_map; + Register const prototype = ToRegister(instr->prototype()); + + // The {object} must be a spec object. It's sufficient to know that {object} + // is not a smi, since all other non-spec objects have {null} prototypes and + // will be ruled out below. + if (instr->hydrogen()->ObjectNeedsSmiCheck()) { + __ test(object, Immediate(kSmiTagMask)); + EmitFalseBranch(instr, zero); + } + + // Loop through the {object}s prototype chain looking for the {prototype}. + __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset)); + Label loop; + __ bind(&loop); + __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset)); + __ cmp(object_prototype, prototype); + EmitTrueBranch(instr, equal); + __ cmp(object_prototype, factory()->null_value()); + EmitFalseBranch(instr, equal); + __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset)); + __ jmp(&loop); +} + + +void LCodeGen::DoCmpT(LCmpT* instr) { + Token::Value op = instr->op(); + + Handle<Code> ic = + CodeFactory::CompareIC(isolate(), op, instr->strength()).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + + Condition condition = ComputeCompareCondition(op); + Label true_value, done; + __ test(eax, Operand(eax)); + __ j(condition, &true_value, Label::kNear); + __ mov(ToRegister(instr->result()), factory()->false_value()); + __ jmp(&done, Label::kNear); + __ bind(&true_value); + __ mov(ToRegister(instr->result()), factory()->true_value()); + __ bind(&done); +} + + +void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) { + int extra_value_count = dynamic_frame_alignment ? 2 : 1; + + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + if (dynamic_frame_alignment && FLAG_debug_code) { + __ cmp(Operand(esp, + (parameter_count + extra_value_count) * kPointerSize), + Immediate(kAlignmentZapValue)); + __ Assert(equal, kExpectedAlignmentMarker); + } + __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); + } else { + DCHECK(info()->IsStub()); // Functions would need to drop one more value. + Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); + Register return_addr_reg = reg.is(ecx) ? ebx : ecx; + if (dynamic_frame_alignment && FLAG_debug_code) { + DCHECK(extra_value_count == 2); + __ cmp(Operand(esp, reg, times_pointer_size, + extra_value_count * kPointerSize), + Immediate(kAlignmentZapValue)); + __ Assert(equal, kExpectedAlignmentMarker); + } + + // emit code to restore stack based on instr->parameter_count() + __ pop(return_addr_reg); // save return address + if (dynamic_frame_alignment) { + __ inc(reg); // 1 more for alignment + } + __ shl(reg, kPointerSizeLog2); + __ add(esp, reg); + __ jmp(return_addr_reg); + } +} + + +void LCodeGen::DoReturn(LReturn* instr) { + if (FLAG_trace && info()->IsOptimizing()) { + // Preserve the return value on the stack and rely on the runtime call + // to return the value in the same register. We're leaving the code + // managed by the register allocator and tearing down the frame, it's + // safe to write to the context register. + __ push(eax); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntime(Runtime::kTraceExit, 1); + } + if (dynamic_frame_alignment_) { + // Fetch the state of the dynamic frame alignment. + __ mov(edx, Operand(ebp, + JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); + } + if (NeedsEagerFrame()) { + __ mov(esp, ebp); + __ pop(ebp); + } + if (dynamic_frame_alignment_) { + Label no_padding; + __ cmp(edx, Immediate(kNoAlignmentPadding)); + __ j(equal, &no_padding, Label::kNear); + + EmitReturn(instr, true); + __ bind(&no_padding); + } + + EmitReturn(instr, false); +} + + +template <class T> +void LCodeGen::EmitVectorLoadICRegisters(T* instr) { + Register vector_register = ToRegister(instr->temp_vector()); + Register slot_register = LoadWithVectorDescriptor::SlotRegister(); + DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); + DCHECK(slot_register.is(eax)); + + AllowDeferredHandleDereference vector_structure_check; + Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); + __ mov(vector_register, vector); + // No need to allocate this register. + FeedbackVectorSlot slot = instr->hydrogen()->slot(); + int index = vector->GetIndex(slot); + __ mov(slot_register, Immediate(Smi::FromInt(index))); +} + + +template <class T> +void LCodeGen::EmitVectorStoreICRegisters(T* instr) { + Register vector_register = ToRegister(instr->temp_vector()); + Register slot_register = ToRegister(instr->temp_slot()); + + AllowDeferredHandleDereference vector_structure_check; + Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); + __ mov(vector_register, vector); + FeedbackVectorSlot slot = instr->hydrogen()->slot(); + int index = vector->GetIndex(slot); + __ mov(slot_register, Immediate(Smi::FromInt(index))); +} + + +void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->global_object()) + .is(LoadDescriptor::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(LoadDescriptor::NameRegister(), instr->name()); + EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); + Handle<Code> ic = + CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(), + SLOPPY, PREMONOMORPHIC).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ mov(result, ContextOperand(context, instr->slot_index())); + + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(result, factory()->the_hole_value()); + if (instr->hydrogen()->DeoptimizesOnHole()) { + DeoptimizeIf(equal, instr, Deoptimizer::kHole); + } else { + Label is_not_hole; + __ j(not_equal, &is_not_hole, Label::kNear); + __ mov(result, factory()->undefined_value()); + __ bind(&is_not_hole); + } + } +} + + +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register value = ToRegister(instr->value()); + + Label skip_assignment; + + Operand target = ContextOperand(context, instr->slot_index()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(target, factory()->the_hole_value()); + if (instr->hydrogen()->DeoptimizesOnHole()) { + DeoptimizeIf(equal, instr, Deoptimizer::kHole); + } else { + __ j(not_equal, &skip_assignment, Label::kNear); + } + } + + __ mov(target, value); + if (instr->hydrogen()->NeedsWriteBarrier()) { + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + Register temp = ToRegister(instr->temp()); + int offset = Context::SlotOffset(instr->slot_index()); + __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs, + EMIT_REMEMBERED_SET, check_needed); + } + + __ bind(&skip_assignment); +} + + +void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); + + if (access.IsExternalMemory()) { + Register result = ToRegister(instr->result()); + MemOperand operand = instr->object()->IsConstantOperand() + ? MemOperand::StaticVariable(ToExternalReference( + LConstantOperand::cast(instr->object()))) + : MemOperand(ToRegister(instr->object()), offset); + __ Load(result, operand, access.representation()); + return; + } + + Register object = ToRegister(instr->object()); + if (instr->hydrogen()->representation().IsDouble()) { + X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); + return; + } + + Register result = ToRegister(instr->result()); + if (!access.IsInobject()) { + __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); + object = result; + } + __ Load(result, FieldOperand(object, offset), access.representation()); +} + + +void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { + DCHECK(!operand->IsDoubleRegister()); + if (operand->IsConstantOperand()) { + Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); + AllowDeferredHandleDereference smi_check; + if (object->IsSmi()) { + __ Push(Handle<Smi>::cast(object)); + } else { + __ PushHeapObject(Handle<HeapObject>::cast(object)); + } + } else if (operand->IsRegister()) { + __ push(ToRegister(operand)); + } else { + __ push(ToOperand(operand)); + } +} + + +void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(LoadDescriptor::NameRegister(), instr->name()); + EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); + Handle<Code> ic = + CodeFactory::LoadICInOptimizedCode( + isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(), + instr->hydrogen()->initialization_state()).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { + Register function = ToRegister(instr->function()); + Register temp = ToRegister(instr->temp()); + Register result = ToRegister(instr->result()); + + // Get the prototype or initial map from the function. + __ mov(result, + FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + + // Check that the function has a prototype or an initial map. + __ cmp(Operand(result), Immediate(factory()->the_hole_value())); + DeoptimizeIf(equal, instr, Deoptimizer::kHole); + + // If the function does not have an initial map, we're done. + Label done; + __ CmpObjectType(result, MAP_TYPE, temp); + __ j(not_equal, &done, Label::kNear); + + // Get the prototype from the initial map. + __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); + + // All done. + __ bind(&done); +} + + +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + +void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { + Register arguments = ToRegister(instr->arguments()); + Register result = ToRegister(instr->result()); + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ mov(result, Operand(arguments, index * kPointerSize)); + } else { + Register length = ToRegister(instr->length()); + Operand index = ToOperand(instr->index()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + __ sub(length, index); + __ mov(result, Operand(arguments, length, times_4, kPointerSize)); + } +} + + +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); + LOperand* key = instr->key(); + if (!key->IsConstantOperand() && + ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), + elements_kind)) { + __ SmiUntag(ToRegister(key)); + } + Operand operand(BuildFastArrayOperand( + instr->elements(), + key, + instr->hydrogen()->key()->representation(), + elements_kind, + instr->base_offset())); + if (elements_kind == FLOAT32_ELEMENTS) { + X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); + } else if (elements_kind == FLOAT64_ELEMENTS) { + X87Mov(ToX87Register(instr->result()), operand); + } else { + Register result(ToRegister(instr->result())); + switch (elements_kind) { + case INT8_ELEMENTS: + __ movsx_b(result, operand); + break; + case UINT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + __ movzx_b(result, operand); + break; + case INT16_ELEMENTS: + __ movsx_w(result, operand); + break; + case UINT16_ELEMENTS: + __ movzx_w(result, operand); + break; + case INT32_ELEMENTS: + __ mov(result, operand); + break; + case UINT32_ELEMENTS: + __ mov(result, operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + __ test(result, Operand(result)); + DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); + } + break; + case FLOAT32_ELEMENTS: + case FLOAT64_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { + if (instr->hydrogen()->RequiresHoleCheck()) { + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset() + sizeof(kHoleNanLower32)); + __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr, Deoptimizer::kHole); + } + + Operand double_load_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset()); + X87Mov(ToX87Register(instr->result()), double_load_operand); +} + + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { + Register result = ToRegister(instr->result()); + + // Load the result. + __ mov(result, + BuildFastArrayOperand(instr->elements(), instr->key(), + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, instr->base_offset())); + + // Check for the hole value. + if (instr->hydrogen()->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ test(result, Immediate(kSmiTagMask)); + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi); + } else { + __ cmp(result, factory()->the_hole_value()); + DeoptimizeIf(equal, instr, Deoptimizer::kHole); + } + } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { + DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); + Label done; + __ cmp(result, factory()->the_hole_value()); + __ j(not_equal, &done); + if (info()->IsStub()) { + // A stub can safely convert the hole to undefined only if the array + // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise + // it needs to bail out. + __ mov(result, isolate()->factory()->array_protector()); + __ cmp(FieldOperand(result, PropertyCell::kValueOffset), + Immediate(Smi::FromInt(Isolate::kArrayProtectorValid))); + DeoptimizeIf(not_equal, instr, Deoptimizer::kHole); + } + __ mov(result, isolate()->factory()->undefined_value()); + __ bind(&done); + } +} + + +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_fixed_typed_array()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); + } else { + DoLoadKeyedFixedArray(instr); + } +} + + +Operand LCodeGen::BuildFastArrayOperand( + LOperand* elements_pointer, + LOperand* key, + Representation key_representation, + ElementsKind elements_kind, + uint32_t base_offset) { + Register elements_pointer_reg = ToRegister(elements_pointer); + int element_shift_size = ElementsKindToShiftSize(elements_kind); + int shift_size = element_shift_size; + if (key->IsConstantOperand()) { + int constant_value = ToInteger32(LConstantOperand::cast(key)); + if (constant_value & 0xF0000000) { + Abort(kArrayIndexConstantValueTooBig); + } + return Operand(elements_pointer_reg, + ((constant_value) << shift_size) + + base_offset); + } else { + // Take the tag bit into account while computing the shift size. + if (key_representation.IsSmi() && (shift_size >= 1)) { + shift_size -= kSmiTagSize; + } + ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); + return Operand(elements_pointer_reg, + ToRegister(key), + scale_factor, + base_offset); + } +} + + +void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); + + if (instr->hydrogen()->HasVectorAndSlot()) { + EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); + } + + Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode( + isolate(), instr->hydrogen()->language_mode(), + instr->hydrogen()->initialization_state()).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { + Register result = ToRegister(instr->result()); + + if (instr->hydrogen()->from_inlined()) { + __ lea(result, Operand(esp, -2 * kPointerSize)); + } else { + // Check for arguments adapter frame. + Label done, adapted; + __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(result, Operand(result, StandardFrameConstants::kContextOffset)); + __ cmp(Operand(result), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adapted, Label::kNear); + + // No arguments adaptor frame. + __ mov(result, Operand(ebp)); + __ jmp(&done, Label::kNear); + + // Arguments adaptor frame present. + __ bind(&adapted); + __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ bind(&done); + } +} + + +void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { + Operand elem = ToOperand(instr->elements()); + Register result = ToRegister(instr->result()); + + Label done; + + // If no arguments adaptor frame the number of arguments is fixed. + __ cmp(ebp, elem); + __ mov(result, Immediate(scope()->num_parameters())); + __ j(equal, &done, Label::kNear); + + // Arguments adaptor frame present. Get argument length from there. + __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(result, Operand(result, + ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiUntag(result); + + // Argument length is in result register. + __ bind(&done); +} + + +void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + + // If the receiver is null or undefined, we have to pass the global + // object as a receiver to normal functions. Values have to be + // passed unchanged to builtins and strict-mode functions. + Label receiver_ok, global_object; + Register scratch = ToRegister(instr->temp()); + + if (!instr->hydrogen()->known_function()) { + // Do not transform the receiver to object for strict mode + // functions. + __ mov(scratch, + FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &receiver_ok); + + // Do not transform the receiver to object for builtins. + __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, &receiver_ok); + } + + // Normal function. Replace undefined or null with global receiver. + __ cmp(receiver, factory()->null_value()); + __ j(equal, &global_object); + __ cmp(receiver, factory()->undefined_value()); + __ j(equal, &global_object); + + // The receiver should be a JS object. + __ test(receiver, Immediate(kSmiTagMask)); + DeoptimizeIf(equal, instr, Deoptimizer::kSmi); + __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); + DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject); + + __ jmp(&receiver_ok, Label::kNear); + __ bind(&global_object); + __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); + const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); + __ mov(receiver, Operand(receiver, global_offset)); + const int proxy_offset = JSGlobalObject::kGlobalProxyOffset; + __ mov(receiver, FieldOperand(receiver, proxy_offset)); + __ bind(&receiver_ok); +} + + +void LCodeGen::DoApplyArguments(LApplyArguments* instr) { + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + Register length = ToRegister(instr->length()); + Register elements = ToRegister(instr->elements()); + DCHECK(receiver.is(eax)); // Used for parameter count. + DCHECK(function.is(edi)); // Required by InvokeFunction. + DCHECK(ToRegister(instr->result()).is(eax)); + + // Copy the arguments to this function possibly from the + // adaptor frame below it. + const uint32_t kArgumentsLimit = 1 * KB; + __ cmp(length, kArgumentsLimit); + DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments); + + __ push(receiver); + __ mov(receiver, length); + + // Loop through the arguments pushing them onto the execution + // stack. + Label invoke, loop; + // length is a small non-negative integer, due to the test above. + __ test(length, Operand(length)); + __ j(zero, &invoke, Label::kNear); + __ bind(&loop); + __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); + __ dec(length); + __ j(not_zero, &loop); + + // Invoke the function. + __ bind(&invoke); + DCHECK(instr->HasPointerMap()); + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator safepoint_generator( + this, pointers, Safepoint::kLazyDeopt); + ParameterCount actual(eax); + __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); +} + + +void LCodeGen::DoDebugBreak(LDebugBreak* instr) { + __ int3(); +} + + +void LCodeGen::DoPushArgument(LPushArgument* instr) { + LOperand* argument = instr->value(); + EmitPushTaggedOperand(argument); +} + + +void LCodeGen::DoDrop(LDrop* instr) { + __ Drop(instr->count()); +} + + +void LCodeGen::DoThisFunction(LThisFunction* instr) { + Register result = ToRegister(instr->result()); + __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); +} + + +void LCodeGen::DoContext(LContext* instr) { + Register result = ToRegister(instr->result()); + if (info()->IsOptimizing()) { + __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); + } else { + // If there is no frame, the context must be in esi. + DCHECK(result.is(esi)); + } +} + + +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + __ push(Immediate(instr->hydrogen()->pairs())); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); + CallRuntime(Runtime::kDeclareGlobals, 2, instr); +} + + +void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, + LInstruction* instr) { + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; + + Register function_reg = edi; + + if (can_invoke_directly) { + // Change context. + __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset)); + + // Always initialize eax to the number of actual arguments. + __ mov(eax, arity); + + // Invoke function directly. + if (function.is_identical_to(info()->closure())) { + __ CallSelf(); + } else { + __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset)); + } + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + } else { + // We need to adapt arguments. + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator( + this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(arity); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); + } +} + + +void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { + DCHECK(ToRegister(instr->result()).is(eax)); + + if (instr->hydrogen()->IsTailCall()) { + if (NeedsEagerFrame()) __ leave(); + + if (instr->target()->IsConstantOperand()) { + LConstantOperand* target = LConstantOperand::cast(instr->target()); + Handle<Code> code = Handle<Code>::cast(ToHandle(target)); + __ jmp(code, RelocInfo::CODE_TARGET); + } else { + DCHECK(instr->target()->IsRegister()); + Register target = ToRegister(instr->target()); + __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(target); + } + } else { + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + + if (instr->target()->IsConstantOperand()) { + LConstantOperand* target = LConstantOperand::cast(instr->target()); + Handle<Code> code = Handle<Code>::cast(ToHandle(target)); + generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); + __ call(code, RelocInfo::CODE_TARGET); + } else { + DCHECK(instr->target()->IsRegister()); + Register target = ToRegister(instr->target()); + generator.BeforeCall(__ CallSize(Operand(target))); + __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ call(target); + } + generator.AfterCall(); + } +} + + +void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ mov(eax, instr->arity()); + + // Change context. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + bool is_self_call = false; + if (instr->hydrogen()->function()->IsConstant()) { + HConstant* fun_const = HConstant::cast(instr->hydrogen()->function()); + Handle<JSFunction> jsfun = + Handle<JSFunction>::cast(fun_const->handle(isolate())); + is_self_call = jsfun.is_identical_to(info()->closure()); + } + + if (is_self_call) { + __ CallSelf(); + } else { + __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); + } + + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); +} + + +void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { + Register input_reg = ToRegister(instr->value()); + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); + + Label slow, allocated, done; + Register tmp = input_reg.is(eax) ? ecx : eax; + Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; + + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + // Check the sign of the argument. If the argument is positive, just + // return it. We do not need to patch the stack since |input| and + // |result| are the same register and |input| will be restored + // unchanged by popping safepoint registers. + __ test(tmp, Immediate(HeapNumber::kSignMask)); + __ j(zero, &done, Label::kNear); + + __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); + __ jmp(&allocated, Label::kNear); + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, + instr, instr->context()); + // Set the pointer to the new heap number in tmp. + if (!tmp.is(eax)) __ mov(tmp, eax); + // Restore input_reg after call to runtime. + __ LoadFromSafepointRegisterSlot(input_reg, input_reg); + + __ bind(&allocated); + __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + __ and_(tmp2, ~HeapNumber::kSignMask); + __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); + __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); + __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); + __ StoreToSafepointRegisterSlot(input_reg, tmp); + + __ bind(&done); +} + + +void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { + Register input_reg = ToRegister(instr->value()); + __ test(input_reg, Operand(input_reg)); + Label is_positive; + __ j(not_sign, &is_positive, Label::kNear); + __ neg(input_reg); // Sets flags. + DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); + __ bind(&is_positive); +} + + +void LCodeGen::DoMathAbs(LMathAbs* instr) { + // Class for deferred case. + class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { + public: + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, + LMathAbs* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { + codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); + } + LInstruction* instr() override { return instr_; } + + private: + LMathAbs* instr_; + }; + + DCHECK(instr->value()->Equals(instr->result())); + Representation r = instr->hydrogen()->value()->representation(); + + if (r.IsDouble()) { + X87Register value = ToX87Register(instr->value()); + X87Fxch(value); + __ fabs(); + } else if (r.IsSmiOrInteger32()) { + EmitIntegerMathAbs(instr); + } else { // Tagged case. + DeferredMathAbsTaggedHeapNumber* deferred = + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); + Register input_reg = ToRegister(instr->value()); + // Smi check. + __ JumpIfNotSmi(input_reg, deferred->entry()); + EmitIntegerMathAbs(instr); + __ bind(deferred->exit()); + } +} + + +void LCodeGen::DoMathFloor(LMathFloor* instr) { + Register output_reg = ToRegister(instr->result()); + X87Register input_reg = ToX87Register(instr->value()); + X87Fxch(input_reg); + + Label not_minus_zero, done; + // Deoptimize on unordered. + __ fldz(); + __ fld(1); + __ FCmp(); + DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); + __ j(below, ¬_minus_zero, Label::kNear); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // Check for negative zero. + __ j(not_equal, ¬_minus_zero, Label::kNear); + // +- 0.0. + __ fld(0); + __ FXamSign(); + DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); + __ Move(output_reg, Immediate(0)); + __ jmp(&done, Label::kFar); + } + + // Positive input. + // rc=01B, round down. + __ bind(¬_minus_zero); + __ fnclex(); + __ X87SetRC(0x0400); + __ sub(esp, Immediate(kPointerSize)); + __ fist_s(Operand(esp, 0)); + __ pop(output_reg); + __ X87CheckIA(); + DeoptimizeIf(equal, instr, Deoptimizer::kOverflow); + __ fnclex(); + __ X87SetRC(0x0000); + __ bind(&done); +} + + +void LCodeGen::DoMathRound(LMathRound* instr) { + X87Register input_reg = ToX87Register(instr->value()); + Register result = ToRegister(instr->result()); + X87Fxch(input_reg); + Label below_one_half, below_minus_one_half, done; + + ExternalReference one_half = ExternalReference::address_of_one_half(); + ExternalReference minus_one_half = + ExternalReference::address_of_minus_one_half(); + + __ fld_d(Operand::StaticVariable(one_half)); + __ fld(1); + __ FCmp(); + __ j(carry, &below_one_half); + + // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x) + __ fld(0); + __ fadd_d(Operand::StaticVariable(one_half)); + // rc=11B, round toward zero. + __ X87SetRC(0x0c00); + __ sub(esp, Immediate(kPointerSize)); + // Clear exception bits. + __ fnclex(); + __ fistp_s(MemOperand(esp, 0)); + // Check overflow. + __ X87CheckIA(); + __ pop(result); + DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow); + __ fnclex(); + // Restore round mode. + __ X87SetRC(0x0000); + __ jmp(&done); + + __ bind(&below_one_half); + __ fld_d(Operand::StaticVariable(minus_one_half)); + __ fld(1); + __ FCmp(); + __ j(carry, &below_minus_one_half); + // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if + // we can ignore the difference between a result of -0 and +0. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + // If the sign is positive, we return +0. + __ fld(0); + __ FXamSign(); + DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); + } + __ Move(result, Immediate(0)); + __ jmp(&done); + + __ bind(&below_minus_one_half); + __ fld(0); + __ fadd_d(Operand::StaticVariable(one_half)); + // rc=01B, round down. + __ X87SetRC(0x0400); + __ sub(esp, Immediate(kPointerSize)); + // Clear exception bits. + __ fnclex(); + __ fistp_s(MemOperand(esp, 0)); + // Check overflow. + __ X87CheckIA(); + __ pop(result); + DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow); + __ fnclex(); + // Restore round mode. + __ X87SetRC(0x0000); + + __ bind(&done); +} + + +void LCodeGen::DoMathFround(LMathFround* instr) { + X87Register input_reg = ToX87Register(instr->value()); + X87Fxch(input_reg); + __ sub(esp, Immediate(kPointerSize)); + __ fstp_s(MemOperand(esp, 0)); + X87Fld(MemOperand(esp, 0), kX87FloatOperand); + __ add(esp, Immediate(kPointerSize)); +} + + +void LCodeGen::DoMathSqrt(LMathSqrt* instr) { + X87Register input = ToX87Register(instr->value()); + X87Register result_reg = ToX87Register(instr->result()); + Register temp_result = ToRegister(instr->temp1()); + Register temp = ToRegister(instr->temp2()); + Label slow, done, smi, finish; + DCHECK(result_reg.is(input)); + + // Store input into Heap number and call runtime function kMathExpRT. + if (FLAG_inline_new) { + __ AllocateHeapNumber(temp_result, temp, no_reg, &slow); + __ jmp(&done, Label::kNear); + } + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + { + // TODO(3095996): Put a valid pointer value in the stack slot where the + // result register is stored, as this register is in the pointer map, but + // contains an integer value. + __ Move(temp_result, Immediate(0)); + + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(temp_result, eax); + } + __ bind(&done); + X87LoadForUsage(input); + __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset)); + + { + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(temp_result); + __ CallRuntimeSaveDoubles(Runtime::kMathSqrt); + RecordSafepointWithRegisters(instr->pointer_map(), 1, + Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(temp_result, eax); + } + X87PrepareToWrite(result_reg); + // return value of MathExpRT is Smi or Heap Number. + __ JumpIfSmi(temp_result, &smi); + // Heap number(double) + __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset)); + __ jmp(&finish); + // SMI + __ bind(&smi); + __ SmiUntag(temp_result); + __ push(temp_result); + __ fild_s(MemOperand(esp, 0)); + __ pop(temp_result); + __ bind(&finish); + X87CommitWrite(result_reg); +} + + +void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { + X87Register input_reg = ToX87Register(instr->value()); + DCHECK(ToX87Register(instr->result()).is(input_reg)); + X87Fxch(input_reg); + // Note that according to ECMA-262 15.8.2.13: + // Math.pow(-Infinity, 0.5) == Infinity + // Math.sqrt(-Infinity) == NaN + Label done, sqrt; + // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1 + __ fxam(); + __ push(eax); + __ fnstsw_ax(); + __ and_(eax, Immediate(0x4700)); + __ cmp(eax, Immediate(0x0700)); + __ j(not_equal, &sqrt, Label::kNear); + // If input is -Infinity, return Infinity. + __ fchs(); + __ jmp(&done, Label::kNear); + + // Square root. + __ bind(&sqrt); + __ fldz(); + __ faddp(); // Convert -0 to +0. + __ fsqrt(); + __ bind(&done); + __ pop(eax); +} + + +void LCodeGen::DoPower(LPower* instr) { + Representation exponent_type = instr->hydrogen()->right()->representation(); + X87Register result = ToX87Register(instr->result()); + // Having marked this as a call, we can use any registers. + X87Register base = ToX87Register(instr->left()); + ExternalReference one_half = ExternalReference::address_of_one_half(); + + if (exponent_type.IsSmi()) { + Register exponent = ToRegister(instr->right()); + X87LoadForUsage(base); + __ SmiUntag(exponent); + __ push(exponent); + __ fild_s(MemOperand(esp, 0)); + __ pop(exponent); + } else if (exponent_type.IsTagged()) { + Register exponent = ToRegister(instr->right()); + Register temp = exponent.is(ecx) ? eax : ecx; + Label no_deopt, done; + X87LoadForUsage(base); + __ JumpIfSmi(exponent, &no_deopt); + __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp); + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); + // Heap number(double) + __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset)); + __ jmp(&done); + // SMI + __ bind(&no_deopt); + __ SmiUntag(exponent); + __ push(exponent); + __ fild_s(MemOperand(esp, 0)); + __ pop(exponent); + __ bind(&done); + } else if (exponent_type.IsInteger32()) { + Register exponent = ToRegister(instr->right()); + X87LoadForUsage(base); + __ push(exponent); + __ fild_s(MemOperand(esp, 0)); + __ pop(exponent); + } else { + DCHECK(exponent_type.IsDouble()); + X87Register exponent_double = ToX87Register(instr->right()); + X87LoadForUsage(base, exponent_double); + } + + // FP data stack {base, exponent(TOS)}. + // Handle (exponent==+-0.5 && base == -0). + Label not_plus_0; + __ fld(0); + __ fabs(); + X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand); + __ FCmp(); + __ j(parity_even, ¬_plus_0, Label::kNear); // NaN. + __ j(not_equal, ¬_plus_0, Label::kNear); + __ fldz(); + // FP data stack {base, exponent(TOS), zero}. + __ faddp(2); + __ bind(¬_plus_0); + + { + __ PrepareCallCFunction(4, eax); + __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value. + __ fstp_d(MemOperand(esp, 0)); // Base value. + X87PrepareToWrite(result); + __ CallCFunction(ExternalReference::power_double_double_function(isolate()), + 4); + // Return value is in st(0) on ia32. + X87CommitWrite(result); + } +} + + +void LCodeGen::DoMathLog(LMathLog* instr) { + DCHECK(instr->value()->Equals(instr->result())); + X87Register input_reg = ToX87Register(instr->value()); + X87Fxch(input_reg); + + Label positive, done, zero, nan_result; + __ fldz(); + __ fld(1); + __ FCmp(); + __ j(below, &nan_result, Label::kNear); + __ j(equal, &zero, Label::kNear); + // Positive input. + // {input, ln2}. + __ fldln2(); + // {ln2, input}. + __ fxch(); + // {result}. + __ fyl2x(); + __ jmp(&done, Label::kNear); + + __ bind(&nan_result); + X87PrepareToWrite(input_reg); + __ push(Immediate(0xffffffff)); + __ push(Immediate(0x7fffffff)); + __ fld_d(MemOperand(esp, 0)); + __ lea(esp, Operand(esp, kDoubleSize)); + X87CommitWrite(input_reg); + __ jmp(&done, Label::kNear); + + __ bind(&zero); + ExternalReference ninf = ExternalReference::address_of_negative_infinity(); + X87PrepareToWrite(input_reg); + __ fld_d(Operand::StaticVariable(ninf)); + X87CommitWrite(input_reg); + + __ bind(&done); +} + + +void LCodeGen::DoMathClz32(LMathClz32* instr) { + Register input = ToRegister(instr->value()); + Register result = ToRegister(instr->result()); + + __ Lzcnt(result, input); +} + + +void LCodeGen::DoMathExp(LMathExp* instr) { + X87Register input = ToX87Register(instr->value()); + X87Register result_reg = ToX87Register(instr->result()); + Register temp_result = ToRegister(instr->temp1()); + Register temp = ToRegister(instr->temp2()); + Label slow, done, smi, finish; + DCHECK(result_reg.is(input)); + + // Store input into Heap number and call runtime function kMathExpRT. + if (FLAG_inline_new) { + __ AllocateHeapNumber(temp_result, temp, no_reg, &slow); + __ jmp(&done, Label::kNear); + } + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + { + // TODO(3095996): Put a valid pointer value in the stack slot where the + // result register is stored, as this register is in the pointer map, but + // contains an integer value. + __ Move(temp_result, Immediate(0)); + + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters(instr->pointer_map(), 0, + Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(temp_result, eax); + } + __ bind(&done); + X87LoadForUsage(input); + __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset)); + + { + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(temp_result); + __ CallRuntimeSaveDoubles(Runtime::kMathExpRT); + RecordSafepointWithRegisters(instr->pointer_map(), 1, + Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(temp_result, eax); + } + X87PrepareToWrite(result_reg); + // return value of MathExpRT is Smi or Heap Number. + __ JumpIfSmi(temp_result, &smi); + // Heap number(double) + __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset)); + __ jmp(&finish); + // SMI + __ bind(&smi); + __ SmiUntag(temp_result); + __ push(temp_result); + __ fild_s(MemOperand(esp, 0)); + __ pop(temp_result); + __ bind(&finish); + X87CommitWrite(result_reg); +} + + +void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(instr->HasPointerMap()); + + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { + LPointerMap* pointers = instr->pointer_map(); + SafepointGenerator generator( + this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(instr->arity()); + __ InvokeFunction(edi, count, CALL_FUNCTION, generator); + } else { + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), + instr->arity(), instr); + } +} + + +void LCodeGen::DoCallFunction(LCallFunction* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->function()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + int arity = instr->arity(); + ConvertReceiverMode mode = instr->hydrogen()->convert_mode(); + if (instr->hydrogen()->HasVectorAndSlot()) { + Register slot_register = ToRegister(instr->temp_slot()); + Register vector_register = ToRegister(instr->temp_vector()); + DCHECK(slot_register.is(edx)); + DCHECK(vector_register.is(ebx)); + + AllowDeferredHandleDereference vector_structure_check; + Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); + int index = vector->GetIndex(instr->hydrogen()->slot()); + + __ mov(vector_register, vector); + __ mov(slot_register, Immediate(Smi::FromInt(index))); + + Handle<Code> ic = + CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + } else { + __ Set(eax, arity); + CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr); + } +} + + +void LCodeGen::DoCallNew(LCallNew* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->constructor()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + // No cell in ebx for construct type feedback in optimized code + __ mov(ebx, isolate()->factory()->undefined_value()); + CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); + __ Move(eax, Immediate(instr->arity())); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->constructor()).is(edi)); + DCHECK(ToRegister(instr->result()).is(eax)); + + __ Move(eax, Immediate(instr->arity())); + if (instr->arity() == 1) { + // We only need the allocation site for the case we have a length argument. + // The case may bail out to the runtime, which will determine the correct + // elements kind with the site. + __ mov(ebx, instr->hydrogen()->site()); + } else { + __ mov(ebx, isolate()->factory()->undefined_value()); + } + + ElementsKind kind = instr->hydrogen()->elements_kind(); + AllocationSiteOverrideMode override_mode = + (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) + ? DISABLE_ALLOCATION_SITES + : DONT_OVERRIDE; + + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + Label done; + if (IsFastPackedElementsKind(kind)) { + Label packed_case; + // We might need a change here + // look at the first argument + __ mov(ecx, Operand(esp, 0)); + __ test(ecx, ecx); + __ j(zero, &packed_case, Label::kNear); + + ElementsKind holey_kind = GetHoleyElementsKind(kind); + ArraySingleArgumentConstructorStub stub(isolate(), + holey_kind, + override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ jmp(&done, Label::kNear); + __ bind(&packed_case); + } + + ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ bind(&done); + } else { + ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); + CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + } +} + + +void LCodeGen::DoCallRuntime(LCallRuntime* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); +} + + +void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { + Register function = ToRegister(instr->function()); + Register code_object = ToRegister(instr->code_object()); + __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); + __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); +} + + +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + if (instr->offset()->IsConstantOperand()) { + LConstantOperand* offset = LConstantOperand::cast(instr->offset()); + __ lea(result, Operand(base, ToInteger32(offset))); + } else { + Register offset = ToRegister(instr->offset()); + __ lea(result, Operand(base, offset, times_1, 0)); + } +} + + +void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { + Representation representation = instr->hydrogen()->field_representation(); + + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); + + if (access.IsExternalMemory()) { + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + MemOperand operand = instr->object()->IsConstantOperand() + ? MemOperand::StaticVariable( + ToExternalReference(LConstantOperand::cast(instr->object()))) + : MemOperand(ToRegister(instr->object()), offset); + if (instr->value()->IsConstantOperand()) { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + __ mov(operand, Immediate(ToInteger32(operand_value))); + } else { + Register value = ToRegister(instr->value()); + __ Store(value, operand, representation); + } + return; + } + + Register object = ToRegister(instr->object()); + __ AssertNotSmi(object); + DCHECK(!representation.IsSmi() || + !instr->value()->IsConstantOperand() || + IsSmi(LConstantOperand::cast(instr->value()))); + if (representation.IsDouble()) { + DCHECK(access.IsInobject()); + DCHECK(!instr->hydrogen()->has_transition()); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + X87Register value = ToX87Register(instr->value()); + X87Mov(FieldOperand(object, offset), value); + return; + } + + if (instr->hydrogen()->has_transition()) { + Handle<Map> transition = instr->hydrogen()->transition_map(); + AddDeprecationDependency(transition); + __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); + if (instr->hydrogen()->NeedsWriteBarrierForMap()) { + Register temp = ToRegister(instr->temp()); + Register temp_map = ToRegister(instr->temp_map()); + __ mov(temp_map, transition); + __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); + // Update the write barrier for the map field. + __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs); + } + } + + // Do the store. + Register write_register = object; + if (!access.IsInobject()) { + write_register = ToRegister(instr->temp()); + __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); + } + + MemOperand operand = FieldOperand(write_register, offset); + if (instr->value()->IsConstantOperand()) { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (operand_value->IsRegister()) { + Register value = ToRegister(operand_value); + __ Store(value, operand, representation); + } else if (representation.IsInteger32() || representation.IsExternal()) { + Immediate immediate = ToImmediate(operand_value, representation); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + __ mov(operand, immediate); + } else { + Handle<Object> handle_value = ToHandle(operand_value); + DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); + __ mov(operand, handle_value); + } + } else { + Register value = ToRegister(instr->value()); + __ Store(value, operand, representation); + } + + if (instr->hydrogen()->NeedsWriteBarrier()) { + Register value = ToRegister(instr->value()); + Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; + // Update the write barrier for the object for in-object properties. + __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs, + EMIT_REMEMBERED_SET, + instr->hydrogen()->SmiCheckForWriteBarrier(), + instr->hydrogen()->PointersToHereCheckForValue()); + } +} + + +void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); + DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); + + if (instr->hydrogen()->HasVectorAndSlot()) { + EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); + } + + __ mov(StoreDescriptor::NameRegister(), instr->name()); + Handle<Code> ic = CodeFactory::StoreICInOptimizedCode( + isolate(), instr->language_mode(), + instr->hydrogen()->initialization_state()).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal; + if (instr->index()->IsConstantOperand()) { + __ cmp(ToOperand(instr->length()), + ToImmediate(LConstantOperand::cast(instr->index()), + instr->hydrogen()->length()->representation())); + cc = CommuteCondition(cc); + } else if (instr->length()->IsConstantOperand()) { + __ cmp(ToOperand(instr->index()), + ToImmediate(LConstantOperand::cast(instr->length()), + instr->hydrogen()->index()->representation())); + } else { + __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); + } + if (FLAG_debug_code && instr->hydrogen()->skip_check()) { + Label done; + __ j(NegateCondition(cc), &done, Label::kNear); + __ int3(); + __ bind(&done); + } else { + DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); + } +} + + +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); + LOperand* key = instr->key(); + if (!key->IsConstantOperand() && + ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), + elements_kind)) { + __ SmiUntag(ToRegister(key)); + } + Operand operand(BuildFastArrayOperand( + instr->elements(), + key, + instr->hydrogen()->key()->representation(), + elements_kind, + instr->base_offset())); + if (elements_kind == FLOAT32_ELEMENTS) { + X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand); + } else if (elements_kind == FLOAT64_ELEMENTS) { + uint64_t int_val = kHoleNanInt64; + int32_t lower = static_cast<int32_t>(int_val); + int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); + Operand operand2 = BuildFastArrayOperand( + instr->elements(), instr->key(), + instr->hydrogen()->key()->representation(), elements_kind, + instr->base_offset() + kPointerSize); + + Label no_special_nan_handling, done; + X87Register value = ToX87Register(instr->value()); + X87Fxch(value); + __ lea(esp, Operand(esp, -kDoubleSize)); + __ fst_d(MemOperand(esp, 0)); + __ lea(esp, Operand(esp, kDoubleSize)); + int offset = sizeof(kHoleNanUpper32); + // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff), + // so we check the upper with 0xffffffff for hole as a temporary fix. + __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff)); + __ j(not_equal, &no_special_nan_handling, Label::kNear); + __ mov(operand, Immediate(lower)); + __ mov(operand2, Immediate(upper)); + __ jmp(&done, Label::kNear); + + __ bind(&no_special_nan_handling); + __ fst_d(operand); + __ bind(&done); + } else { + Register value = ToRegister(instr->value()); + switch (elements_kind) { + case UINT8_ELEMENTS: + case INT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + __ mov_b(operand, value); + break; + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + __ mov_w(operand, value); + break; + case UINT32_ELEMENTS: + case INT32_ELEMENTS: + __ mov(operand, value); + break; + case FLOAT32_ELEMENTS: + case FLOAT64_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case FAST_SLOPPY_ARGUMENTS_ELEMENTS: + case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + Operand double_store_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + instr->base_offset()); + + uint64_t int_val = kHoleNanInt64; + int32_t lower = static_cast<int32_t>(int_val); + int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); + Operand double_store_operand2 = BuildFastArrayOperand( + instr->elements(), instr->key(), + instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS, + instr->base_offset() + kPointerSize); + + if (instr->hydrogen()->IsConstantHoleStore()) { + // This means we should store the (double) hole. No floating point + // registers required. + __ mov(double_store_operand, Immediate(lower)); + __ mov(double_store_operand2, Immediate(upper)); + } else { + Label no_special_nan_handling, done; + X87Register value = ToX87Register(instr->value()); + X87Fxch(value); + + if (instr->NeedsCanonicalization()) { + __ fld(0); + __ fld(0); + __ FCmp(); + __ j(parity_odd, &no_special_nan_handling, Label::kNear); + // All NaNs are Canonicalized to 0x7fffffffffffffff + __ mov(double_store_operand, Immediate(0xffffffff)); + __ mov(double_store_operand2, Immediate(0x7fffffff)); + __ jmp(&done, Label::kNear); + } else { + __ lea(esp, Operand(esp, -kDoubleSize)); + __ fst_d(MemOperand(esp, 0)); + __ lea(esp, Operand(esp, kDoubleSize)); + int offset = sizeof(kHoleNanUpper32); + // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff), + // so we check the upper with 0xffffffff for hole as a temporary fix. + __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff)); + __ j(not_equal, &no_special_nan_handling, Label::kNear); + __ mov(double_store_operand, Immediate(lower)); + __ mov(double_store_operand2, Immediate(upper)); + __ jmp(&done, Label::kNear); + } + __ bind(&no_special_nan_handling); + __ fst_d(double_store_operand); + __ bind(&done); + } +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { + Register elements = ToRegister(instr->elements()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; + + Operand operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, + instr->base_offset()); + if (instr->value()->IsRegister()) { + __ mov(operand, ToRegister(instr->value())); + } else { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (IsSmi(operand_value)) { + Immediate immediate = ToImmediate(operand_value, Representation::Smi()); + __ mov(operand, immediate); + } else { + DCHECK(!IsInteger32(operand_value)); + Handle<Object> handle_value = ToHandle(operand_value); + __ mov(operand, handle_value); + } + } + + if (instr->hydrogen()->NeedsWriteBarrier()) { + DCHECK(instr->value()->IsRegister()); + Register value = ToRegister(instr->value()); + DCHECK(!instr->key()->IsConstantOperand()); + SmiCheck check_needed = + instr->hydrogen()->value()->type().IsHeapObject() + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ lea(key, operand); + __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET, + check_needed, + instr->hydrogen()->PointersToHereCheckForValue()); + } +} + + +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases...external, fast-double, fast + if (instr->is_fixed_typed_array()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); + } +} + + +void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); + DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); + DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); + + if (instr->hydrogen()->HasVectorAndSlot()) { + EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); + } + + Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( + isolate(), instr->language_mode(), + instr->hydrogen()->initialization_state()).code(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); + DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound); + __ bind(&no_memento_found); +} + + +void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { + class DeferredMaybeGrowElements final : public LDeferredCode { + public: + DeferredMaybeGrowElements(LCodeGen* codegen, + LMaybeGrowElements* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) {} + void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } + LInstruction* instr() override { return instr_; } + + private: + LMaybeGrowElements* instr_; + }; + + Register result = eax; + DeferredMaybeGrowElements* deferred = + new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_); + LOperand* key = instr->key(); + LOperand* current_capacity = instr->current_capacity(); + + DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); + DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); + DCHECK(key->IsConstantOperand() || key->IsRegister()); + DCHECK(current_capacity->IsConstantOperand() || + current_capacity->IsRegister()); + + if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { + int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); + int32_t constant_capacity = + ToInteger32(LConstantOperand::cast(current_capacity)); + if (constant_key >= constant_capacity) { + // Deferred case. + __ jmp(deferred->entry()); + } + } else if (key->IsConstantOperand()) { + int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); + __ cmp(ToOperand(current_capacity), Immediate(constant_key)); + __ j(less_equal, deferred->entry()); + } else if (current_capacity->IsConstantOperand()) { + int32_t constant_capacity = + ToInteger32(LConstantOperand::cast(current_capacity)); + __ cmp(ToRegister(key), Immediate(constant_capacity)); + __ j(greater_equal, deferred->entry()); + } else { + __ cmp(ToRegister(key), ToRegister(current_capacity)); + __ j(greater_equal, deferred->entry()); + } + + __ mov(result, ToOperand(instr->elements())); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + Register result = eax; + __ Move(result, Immediate(0)); + + // We have to call a stub. + { + PushSafepointRegistersScope scope(this); + if (instr->object()->IsRegister()) { + __ Move(result, ToRegister(instr->object())); + } else { + __ mov(result, ToOperand(instr->object())); + } + + LOperand* key = instr->key(); + if (key->IsConstantOperand()) { + __ mov(ebx, ToImmediate(key, Representation::Smi())); + } else { + __ Move(ebx, ToRegister(key)); + __ SmiTag(ebx); + } + + GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), + instr->hydrogen()->kind()); + __ CallStub(&stub); + RecordSafepointWithLazyDeopt( + instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + __ StoreToSafepointRegisterSlot(result, result); + } + + // Deopt on smi, which means the elements array changed to dictionary mode. + __ test(result, Immediate(kSmiTagMask)); + DeoptimizeIf(equal, instr, Deoptimizer::kSmi); +} + + +void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { + Register object_reg = ToRegister(instr->object()); + + Handle<Map> from_map = instr->original_map(); + Handle<Map> to_map = instr->transitioned_map(); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); + + Label not_applicable; + bool is_simple_map_transition = + IsSimpleMapChangeTransition(from_kind, to_kind); + Label::Distance branch_distance = + is_simple_map_transition ? Label::kNear : Label::kFar; + __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); + __ j(not_equal, ¬_applicable, branch_distance); + if (is_simple_map_transition) { + Register new_map_reg = ToRegister(instr->new_map_temp()); + __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), + Immediate(to_map)); + // Write barrier. + DCHECK_NOT_NULL(instr->temp()); + __ RecordWriteForMap(object_reg, to_map, new_map_reg, + ToRegister(instr->temp()), kDontSaveFPRegs); + } else { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(object_reg.is(eax)); + PushSafepointRegistersScope scope(this); + __ mov(ebx, to_map); + bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; + TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); + __ CallStub(&stub); + RecordSafepointWithLazyDeopt(instr, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + } + __ bind(¬_applicable); +} + + +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt final : public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, + LStringCharCodeAt* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } + LInstruction* instr() override { return instr_; } + + private: + LStringCharCodeAt* instr_; + }; + + DeferredStringCharCodeAt* deferred = + new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); + + StringCharLoadGenerator::Generate(masm(), + factory(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->result()), + deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Move(result, Immediate(0)); + + PushSafepointRegistersScope scope(this); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (instr->index()->IsConstantOperand()) { + Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()), + Representation::Smi()); + __ push(immediate); + } else { + Register index = ToRegister(instr->index()); + __ SmiTag(index); + __ push(index); + } + CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, + instr, instr->context()); + __ AssertSmi(eax); + __ SmiUntag(eax); + __ StoreToSafepointRegisterSlot(result, eax); +} + + +void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { + class DeferredStringCharFromCode final : public LDeferredCode { + public: + DeferredStringCharFromCode(LCodeGen* codegen, + LStringCharFromCode* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { + codegen()->DoDeferredStringCharFromCode(instr_); + } + LInstruction* instr() override { return instr_; } + + private: + LStringCharFromCode* instr_; + }; + + DeferredStringCharFromCode* deferred = + new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); + + DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); + Register char_code = ToRegister(instr->char_code()); + Register result = ToRegister(instr->result()); + DCHECK(!char_code.is(result)); + + __ cmp(char_code, String::kMaxOneByteCharCode); + __ j(above, deferred->entry()); + __ Move(result, Immediate(factory()->single_character_string_cache())); + __ mov(result, FieldOperand(result, + char_code, times_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(result, factory()->undefined_value()); + __ j(equal, deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { + Register char_code = ToRegister(instr->char_code()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Move(result, Immediate(0)); + + PushSafepointRegistersScope scope(this); + __ SmiTag(char_code); + __ push(char_code); + CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, + instr->context()); + __ StoreToSafepointRegisterSlot(result, eax); +} + + +void LCodeGen::DoStringAdd(LStringAdd* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->left()).is(edx)); + DCHECK(ToRegister(instr->right()).is(eax)); + StringAddStub stub(isolate(), + instr->hydrogen()->flags(), + instr->hydrogen()->pretenure_flag()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { + LOperand* input = instr->value(); + LOperand* output = instr->result(); + DCHECK(input->IsRegister() || input->IsStackSlot()); + DCHECK(output->IsDoubleRegister()); + if (input->IsRegister()) { + Register input_reg = ToRegister(input); + __ push(input_reg); + X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); + __ pop(input_reg); + } else { + X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); + } +} + + +void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + LOperand* input = instr->value(); + LOperand* output = instr->result(); + X87Register res = ToX87Register(output); + X87PrepareToWrite(res); + __ LoadUint32NoSSE2(ToRegister(input)); + X87CommitWrite(res); +} + + +void LCodeGen::DoNumberTagI(LNumberTagI* instr) { + class DeferredNumberTagI final : public LDeferredCode { + public: + DeferredNumberTagI(LCodeGen* codegen, + LNumberTagI* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), + SIGNED_INT32); + } + LInstruction* instr() override { return instr_; } + + private: + LNumberTagI* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister() && input->Equals(instr->result())); + Register reg = ToRegister(input); + + DeferredNumberTagI* deferred = + new(zone()) DeferredNumberTagI(this, instr, x87_stack_); + __ SmiTag(reg); + __ j(overflow, deferred->entry()); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoNumberTagU(LNumberTagU* instr) { + class DeferredNumberTagU final : public LDeferredCode { + public: + DeferredNumberTagU(LCodeGen* codegen, + LNumberTagU* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), + UNSIGNED_INT32); + } + LInstruction* instr() override { return instr_; } + + private: + LNumberTagU* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister() && input->Equals(instr->result())); + Register reg = ToRegister(input); + + DeferredNumberTagU* deferred = + new(zone()) DeferredNumberTagU(this, instr, x87_stack_); + __ cmp(reg, Immediate(Smi::kMaxValue)); + __ j(above, deferred->entry()); + __ SmiTag(reg); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, + LOperand* value, + LOperand* temp, + IntegerSignedness signedness) { + Label done, slow; + Register reg = ToRegister(value); + Register tmp = ToRegister(temp); + + if (signedness == SIGNED_INT32) { + // There was overflow, so bits 30 and 31 of the original integer + // disagree. Try to allocate a heap number in new space and store + // the value in there. If that fails, call the runtime system. + __ SmiUntag(reg); + __ xor_(reg, 0x80000000); + __ push(reg); + __ fild_s(Operand(esp, 0)); + __ pop(reg); + } else { + // There's no fild variant for unsigned values, so zero-extend to a 64-bit + // int manually. + __ push(Immediate(0)); + __ push(reg); + __ fild_d(Operand(esp, 0)); + __ pop(reg); + __ pop(reg); + } + + if (FLAG_inline_new) { + __ AllocateHeapNumber(reg, tmp, no_reg, &slow); + __ jmp(&done, Label::kNear); + } + + // Slow case: Call the runtime system to do the number allocation. + __ bind(&slow); + { + // TODO(3095996): Put a valid pointer value in the stack slot where the + // result register is stored, as this register is in the pointer map, but + // contains an integer value. + __ Move(reg, Immediate(0)); + + // Preserve the value of all registers. + PushSafepointRegistersScope scope(this); + + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(reg, eax); + } + + __ bind(&done); + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); +} + + +void LCodeGen::DoNumberTagD(LNumberTagD* instr) { + class DeferredNumberTagD final : public LDeferredCode { + public: + DeferredNumberTagD(LCodeGen* codegen, + LNumberTagD* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } + LInstruction* instr() override { return instr_; } + + private: + LNumberTagD* instr_; + }; + + Register reg = ToRegister(instr->result()); + + // Put the value to the top of stack + X87Register src = ToX87Register(instr->value()); + // Don't use X87LoadForUsage here, which is only used by Instruction which + // clobbers fp registers. + x87_stack_.Fxch(src); + + DeferredNumberTagD* deferred = + new(zone()) DeferredNumberTagD(this, instr, x87_stack_); + if (FLAG_inline_new) { + Register tmp = ToRegister(instr->temp()); + __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); + } else { + __ jmp(deferred->entry()); + } + __ bind(deferred->exit()); + __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); +} + + +void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + Register reg = ToRegister(instr->result()); + __ Move(reg, Immediate(0)); + + PushSafepointRegistersScope scope(this); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(reg, eax); +} + + +void LCodeGen::DoSmiTag(LSmiTag* instr) { + HChange* hchange = instr->hydrogen(); + Register input = ToRegister(instr->value()); + if (hchange->CheckFlag(HValue::kCanOverflow) && + hchange->value()->CheckFlag(HValue::kUint32)) { + __ test(input, Immediate(0xc0000000)); + DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow); + } + __ SmiTag(input); + if (hchange->CheckFlag(HValue::kCanOverflow) && + !hchange->value()->CheckFlag(HValue::kUint32)) { + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); + } +} + + +void LCodeGen::DoSmiUntag(LSmiUntag* instr) { + LOperand* input = instr->value(); + Register result = ToRegister(input); + DCHECK(input->IsRegister() && input->Equals(instr->result())); + if (instr->needs_check()) { + __ test(result, Immediate(kSmiTagMask)); + DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi); + } else { + __ AssertSmi(result); + } + __ SmiUntag(result); +} + + +void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg, + Register temp_reg, X87Register res_reg, + NumberUntagDMode mode) { + bool can_convert_undefined_to_nan = + instr->hydrogen()->can_convert_undefined_to_nan(); + bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); + + Label load_smi, done; + + X87PrepareToWrite(res_reg); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ JumpIfSmi(input_reg, &load_smi); + + // Heap number map check. + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + if (!can_convert_undefined_to_nan) { + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); + } else { + Label heap_number, convert; + __ j(equal, &heap_number); + + // Convert undefined (or hole) to NaN. + __ cmp(input_reg, factory()->undefined_value()); + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); + + __ bind(&convert); + __ push(Immediate(0xffffffff)); + __ push(Immediate(0x7fffffff)); + __ fld_d(MemOperand(esp, 0)); + __ lea(esp, Operand(esp, kDoubleSize)); + __ jmp(&done, Label::kNear); + + __ bind(&heap_number); + } + // Heap number to x87 conversion. + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + __ fldz(); + __ FCmp(); + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ j(not_zero, &done, Label::kNear); + + // Use general purpose registers to check if we have -0.0 + __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + __ test(temp_reg, Immediate(HeapNumber::kSignMask)); + __ j(zero, &done, Label::kNear); + + // Pop FPU stack before deoptimizing. + __ fstp(0); + DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); + } + __ jmp(&done, Label::kNear); + } else { + DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); + } + + __ bind(&load_smi); + // Clobbering a temp is faster than re-tagging the + // input register since we avoid dependencies. + __ mov(temp_reg, input_reg); + __ SmiUntag(temp_reg); // Untag smi before converting to float. + __ push(temp_reg); + __ fild_s(Operand(esp, 0)); + __ add(esp, Immediate(kPointerSize)); + __ bind(&done); + X87CommitWrite(res_reg); +} + + +void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { + Register input_reg = ToRegister(instr->value()); + + // The input was optimistically untagged; revert it. + STATIC_ASSERT(kSmiTagSize == 1); + __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); + + if (instr->truncating()) { + Label no_heap_number, check_bools, check_false; + + // Heap number map check. + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(not_equal, &no_heap_number, Label::kNear); + __ TruncateHeapNumberToI(input_reg, input_reg); + __ jmp(done); + + __ bind(&no_heap_number); + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. + __ cmp(input_reg, factory()->undefined_value()); + __ j(not_equal, &check_bools, Label::kNear); + __ Move(input_reg, Immediate(0)); + __ jmp(done); + + __ bind(&check_bools); + __ cmp(input_reg, factory()->true_value()); + __ j(not_equal, &check_false, Label::kNear); + __ Move(input_reg, Immediate(1)); + __ jmp(done); + + __ bind(&check_false); + __ cmp(input_reg, factory()->false_value()); + DeoptimizeIf(not_equal, instr, + Deoptimizer::kNotAHeapNumberUndefinedBoolean); + __ Move(input_reg, Immediate(0)); + } else { + // TODO(olivf) Converting a number on the fpu is actually quite slow. We + // should first try a fast conversion and then bailout to this slow case. + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); + + __ sub(esp, Immediate(kPointerSize)); + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); + + if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { + Label no_precision_lost, not_nan, zero_check; + __ fld(0); + + __ fist_s(MemOperand(esp, 0)); + __ fild_s(MemOperand(esp, 0)); + __ FCmp(); + __ pop(input_reg); + + __ j(equal, &no_precision_lost, Label::kNear); + __ fstp(0); + DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); + __ bind(&no_precision_lost); + + __ j(parity_odd, ¬_nan); + __ fstp(0); + DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); + __ bind(¬_nan); + + __ test(input_reg, Operand(input_reg)); + __ j(zero, &zero_check, Label::kNear); + __ fstp(0); + __ jmp(done); + + __ bind(&zero_check); + // To check for minus zero, we load the value again as float, and check + // if that is still 0. + __ sub(esp, Immediate(kPointerSize)); + __ fstp_s(Operand(esp, 0)); + __ pop(input_reg); + __ test(input_reg, Operand(input_reg)); + DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); + } else { + __ fist_s(MemOperand(esp, 0)); + __ fild_s(MemOperand(esp, 0)); + __ FCmp(); + __ pop(input_reg); + DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); + DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); + } + } +} + + +void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI final : public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, + LTaggedToI* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); } + LInstruction* instr() override { return instr_; } + + private: + LTaggedToI* instr_; + }; + + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + Register input_reg = ToRegister(input); + DCHECK(input_reg.is(ToRegister(instr->result()))); + + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiUntag(input_reg); + } else { + DeferredTaggedToI* deferred = + new(zone()) DeferredTaggedToI(this, instr, x87_stack_); + // Optimistically untag the input. + // If the input is a HeapObject, SmiUntag will set the carry flag. + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ SmiUntag(input_reg); + // Branch to deferred code if the input was tagged. + // The deferred code will take care of restoring the tag. + __ j(carry, deferred->entry()); + __ bind(deferred->exit()); + } +} + + +void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + LOperand* temp = instr->temp(); + DCHECK(temp->IsRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsDoubleRegister()); + + Register input_reg = ToRegister(input); + Register temp_reg = ToRegister(temp); + + HValue* value = instr->hydrogen()->value(); + NumberUntagDMode mode = value->representation().IsSmi() + ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; + + EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result), + mode); +} + + +void LCodeGen::DoDoubleToI(LDoubleToI* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsDoubleRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsRegister()); + Register result_reg = ToRegister(result); + + if (instr->truncating()) { + X87Register input_reg = ToX87Register(input); + X87Fxch(input_reg); + __ TruncateX87TOSToI(result_reg); + } else { + Label lost_precision, is_nan, minus_zero, done; + X87Register input_reg = ToX87Register(input); + X87Fxch(input_reg); + __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), + &lost_precision, &is_nan, &minus_zero); + __ jmp(&done); + __ bind(&lost_precision); + DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); + __ bind(&is_nan); + DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); + __ bind(&minus_zero); + DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); + __ bind(&done); + } +} + + +void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { + LOperand* input = instr->value(); + DCHECK(input->IsDoubleRegister()); + LOperand* result = instr->result(); + DCHECK(result->IsRegister()); + Register result_reg = ToRegister(result); + + Label lost_precision, is_nan, minus_zero, done; + X87Register input_reg = ToX87Register(input); + X87Fxch(input_reg); + __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), + &lost_precision, &is_nan, &minus_zero); + __ jmp(&done); + __ bind(&lost_precision); + DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); + __ bind(&is_nan); + DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); + __ bind(&minus_zero); + DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); + __ bind(&done); + __ SmiTag(result_reg); + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); +} + + +void LCodeGen::DoCheckSmi(LCheckSmi* instr) { + LOperand* input = instr->value(); + __ test(ToOperand(input), Immediate(kSmiTagMask)); + DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi); +} + + +void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { + if (!instr->hydrogen()->value()->type().IsHeapObject()) { + LOperand* input = instr->value(); + __ test(ToOperand(input), Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr, Deoptimizer::kSmi); + } +} + + +void LCodeGen::DoCheckArrayBufferNotNeutered( + LCheckArrayBufferNotNeutered* instr) { + Register view = ToRegister(instr->view()); + Register scratch = ToRegister(instr->scratch()); + + __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset)); + __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset), + 1 << JSArrayBuffer::WasNeutered::kShift); + DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds); +} + + +void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); + + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + + if (instr->hydrogen()->is_interval_check()) { + InstanceType first; + InstanceType last; + instr->hydrogen()->GetCheckInterval(&first, &last); + + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), + static_cast<int8_t>(first)); + + // If there is only one type in the interval check for equality. + if (first == last) { + DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); + } else { + DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType); + // Omit check for the last type. + if (last != LAST_TYPE) { + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), + static_cast<int8_t>(last)); + DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType); + } + } + } else { + uint8_t mask; + uint8_t tag; + instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); + + if (base::bits::IsPowerOfTwo32(mask)) { + DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); + __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); + DeoptimizeIf(tag == 0 ? not_zero : zero, instr, + Deoptimizer::kWrongInstanceType); + } else { + __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ and_(temp, mask); + __ cmp(temp, tag); + DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); + } + } +} + + +void LCodeGen::DoCheckValue(LCheckValue* instr) { + Handle<HeapObject> object = instr->hydrogen()->object().handle(); + if (instr->hydrogen()->object_in_new_space()) { + Register reg = ToRegister(instr->value()); + Handle<Cell> cell = isolate()->factory()->NewCell(object); + __ cmp(reg, Operand::ForCell(cell)); + } else { + Operand operand = ToOperand(instr->value()); + __ cmp(operand, object); + } + DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch); +} + + +void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { + { + PushSafepointRegistersScope scope(this); + __ push(object); + __ xor_(esi, esi); + __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); + RecordSafepointWithRegisters( + instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); + + __ test(eax, Immediate(kSmiTagMask)); + } + DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed); +} + + +void LCodeGen::DoCheckMaps(LCheckMaps* instr) { + class DeferredCheckMaps final : public LDeferredCode { + public: + DeferredCheckMaps(LCodeGen* codegen, + LCheckMaps* instr, + Register object, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { + SetExit(check_maps()); + } + void Generate() override { + codegen()->DoDeferredInstanceMigration(instr_, object_); + } + Label* check_maps() { return &check_maps_; } + LInstruction* instr() override { return instr_; } + + private: + LCheckMaps* instr_; + Label check_maps_; + Register object_; + }; + + if (instr->hydrogen()->IsStabilityCheck()) { + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + for (int i = 0; i < maps->size(); ++i) { + AddStabilityDependency(maps->at(i).handle()); + } + return; + } + + LOperand* input = instr->value(); + DCHECK(input->IsRegister()); + Register reg = ToRegister(input); + + DeferredCheckMaps* deferred = NULL; + if (instr->hydrogen()->HasMigrationTarget()) { + deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); + __ bind(deferred->check_maps()); + } + + const UniqueSet<Map>* maps = instr->hydrogen()->maps(); + Label success; + for (int i = 0; i < maps->size() - 1; i++) { + Handle<Map> map = maps->at(i).handle(); + __ CompareMap(reg, map); + __ j(equal, &success, Label::kNear); + } + + Handle<Map> map = maps->at(maps->size() - 1).handle(); + __ CompareMap(reg, map); + if (instr->hydrogen()->HasMigrationTarget()) { + __ j(not_equal, deferred->entry()); + } else { + DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); + } + + __ bind(&success); +} + + +void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { + X87Register value_reg = ToX87Register(instr->unclamped()); + Register result_reg = ToRegister(instr->result()); + X87Fxch(value_reg); + __ ClampTOSToUint8(result_reg); +} + + +void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { + DCHECK(instr->unclamped()->Equals(instr->result())); + Register value_reg = ToRegister(instr->result()); + __ ClampUint8(value_reg); +} + + +void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { + Register input_reg = ToRegister(instr->unclamped()); + Register result_reg = ToRegister(instr->result()); + Register scratch = ToRegister(instr->scratch()); + Register scratch2 = ToRegister(instr->scratch2()); + Register scratch3 = ToRegister(instr->scratch3()); + Label is_smi, done, heap_number, valid_exponent, + largest_value, zero_result, maybe_nan_or_infinity; + + __ JumpIfSmi(input_reg, &is_smi); + + // Check for heap number + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(equal, &heap_number, Label::kNear); + + // Check for undefined. Undefined is converted to zero for clamping + // conversions. + __ cmp(input_reg, factory()->undefined_value()); + DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); + __ jmp(&zero_result, Label::kNear); + + // Heap number + __ bind(&heap_number); + + // Surprisingly, all of the hand-crafted bit-manipulations below are much + // faster than the x86 FPU built-in instruction, especially since "banker's + // rounding" would be additionally very expensive + + // Get exponent word. + __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); + __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); + + // Test for negative values --> clamp to zero + __ test(scratch, scratch); + __ j(negative, &zero_result, Label::kNear); + + // Get exponent alone in scratch2. + __ mov(scratch2, scratch); + __ and_(scratch2, HeapNumber::kExponentMask); + __ shr(scratch2, HeapNumber::kExponentShift); + __ j(zero, &zero_result, Label::kNear); + __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); + __ j(negative, &zero_result, Label::kNear); + + const uint32_t non_int8_exponent = 7; + __ cmp(scratch2, Immediate(non_int8_exponent + 1)); + // If the exponent is too big, check for special values. + __ j(greater, &maybe_nan_or_infinity, Label::kNear); + + __ bind(&valid_exponent); + // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent + // < 7. The shift bias is the number of bits to shift the mantissa such that + // with an exponent of 7 such the that top-most one is in bit 30, allowing + // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to + // 1). + int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; + __ lea(result_reg, MemOperand(scratch2, shift_bias)); + // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the + // top bits of the mantissa. + __ and_(scratch, HeapNumber::kMantissaMask); + // Put back the implicit 1 of the mantissa + __ or_(scratch, 1 << HeapNumber::kExponentShift); + // Shift up to round + __ shl_cl(scratch); + // Use "banker's rounding" to spec: If fractional part of number is 0.5, then + // use the bit in the "ones" place and add it to the "halves" place, which has + // the effect of rounding to even. + __ mov(scratch2, scratch); + const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; + const uint32_t one_bit_shift = one_half_bit_shift + 1; + __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); + __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); + Label no_round; + __ j(less, &no_round, Label::kNear); + Label round_up; + __ mov(scratch2, Immediate(1 << one_half_bit_shift)); + __ j(greater, &round_up, Label::kNear); + __ test(scratch3, scratch3); + __ j(not_zero, &round_up, Label::kNear); + __ mov(scratch2, scratch); + __ and_(scratch2, Immediate(1 << one_bit_shift)); + __ shr(scratch2, 1); + __ bind(&round_up); + __ add(scratch, scratch2); + __ j(overflow, &largest_value, Label::kNear); + __ bind(&no_round); + __ shr(scratch, 23); + __ mov(result_reg, scratch); + __ jmp(&done, Label::kNear); + + __ bind(&maybe_nan_or_infinity); + // Check for NaN/Infinity, all other values map to 255 + __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); + __ j(not_equal, &largest_value, Label::kNear); + + // Check for NaN, which differs from Infinity in that at least one mantissa + // bit is set. + __ and_(scratch, HeapNumber::kMantissaMask); + __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); + __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN + // Infinity -> Fall through to map to 255. + + __ bind(&largest_value); + __ mov(result_reg, Immediate(255)); + __ jmp(&done, Label::kNear); + + __ bind(&zero_result); + __ xor_(result_reg, result_reg); + __ jmp(&done, Label::kNear); + + // smi + __ bind(&is_smi); + if (!input_reg.is(result_reg)) { + __ mov(result_reg, input_reg); + } + __ SmiUntag(result_reg); + __ ClampUint8(result_reg); + __ bind(&done); +} + + +void LCodeGen::DoDoubleBits(LDoubleBits* instr) { + X87Register value_reg = ToX87Register(instr->value()); + Register result_reg = ToRegister(instr->result()); + X87Fxch(value_reg); + __ sub(esp, Immediate(kDoubleSize)); + __ fst_d(Operand(esp, 0)); + if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { + __ mov(result_reg, Operand(esp, kPointerSize)); + } else { + __ mov(result_reg, Operand(esp, 0)); + } + __ add(esp, Immediate(kDoubleSize)); +} + + +void LCodeGen::DoConstructDouble(LConstructDouble* instr) { + Register hi_reg = ToRegister(instr->hi()); + Register lo_reg = ToRegister(instr->lo()); + X87Register result_reg = ToX87Register(instr->result()); + // Follow below pattern to write a x87 fp register. + X87PrepareToWrite(result_reg); + __ sub(esp, Immediate(kDoubleSize)); + __ mov(Operand(esp, 0), lo_reg); + __ mov(Operand(esp, kPointerSize), hi_reg); + __ fld_d(Operand(esp, 0)); + __ add(esp, Immediate(kDoubleSize)); + X87CommitWrite(result_reg); +} + + +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate final : public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, + LAllocate* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { codegen()->DoDeferredAllocate(instr_); } + LInstruction* instr() override { return instr_; } + + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr, x87_stack_); + + Register result = ToRegister(instr->result()); + Register temp = ToRegister(instr->temp()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->hydrogen()->IsOldSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = static_cast<AllocationFlags>(flags | PRETENURE); + } + + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + CHECK(size <= Page::kMaxRegularHeapObjectSize); + __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); + } else { + Register size = ToRegister(instr->size()); + __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); + } + + __ bind(deferred->exit()); + + if (instr->hydrogen()->MustPrefillWithFiller()) { + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + __ mov(temp, (size / kPointerSize) - 1); + } else { + temp = ToRegister(instr->size()); + __ shr(temp, kPointerSizeLog2); + __ dec(temp); + } + Label loop; + __ bind(&loop); + __ mov(FieldOperand(result, temp, times_pointer_size, 0), + isolate()->factory()->one_pointer_filler_map()); + __ dec(temp); + __ j(not_zero, &loop); + } +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Move(result, Immediate(Smi::FromInt(0))); + + PushSafepointRegistersScope scope(this); + if (instr->size()->IsRegister()) { + Register size = ToRegister(instr->size()); + DCHECK(!size.is(result)); + __ SmiTag(ToRegister(instr->size())); + __ push(size); + } else { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (size >= 0 && size <= Smi::kMaxValue) { + __ push(Immediate(Smi::FromInt(size))); + } else { + // We should never get here at runtime => abort + __ int3(); + return; + } + } + + int flags = AllocateDoubleAlignFlag::encode( + instr->hydrogen()->MustAllocateDoubleAligned()); + if (instr->hydrogen()->IsOldSpaceAllocation()) { + DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); + flags = AllocateTargetSpace::update(flags, OLD_SPACE); + } else { + flags = AllocateTargetSpace::update(flags, NEW_SPACE); + } + __ push(Immediate(Smi::FromInt(flags))); + + CallRuntimeFromDeferred( + Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); + __ StoreToSafepointRegisterSlot(result, eax); +} + + +void LCodeGen::DoToFastProperties(LToFastProperties* instr) { + DCHECK(ToRegister(instr->value()).is(eax)); + __ push(eax); + CallRuntime(Runtime::kToFastProperties, 1, instr); +} + + +void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + Label materialized; + // Registers will be used as follows: + // ecx = literals array. + // ebx = regexp literal. + // eax = regexp literal clone. + // esi = context. + int literal_offset = + LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index()); + __ LoadHeapObject(ecx, instr->hydrogen()->literals()); + __ mov(ebx, FieldOperand(ecx, literal_offset)); + __ cmp(ebx, factory()->undefined_value()); + __ j(not_equal, &materialized, Label::kNear); + + // Create regexp literal using runtime function + // Result will be in eax. + __ push(ecx); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ push(Immediate(instr->hydrogen()->pattern())); + __ push(Immediate(instr->hydrogen()->flags())); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); + __ mov(ebx, eax); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated, Label::kNear); + + __ bind(&runtime_allocate); + __ push(ebx); + __ push(Immediate(Smi::FromInt(size))); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); + __ pop(ebx); + + __ bind(&allocated); + // Copy the content into the newly allocated memory. + // (Unroll copy loop once for better throughput). + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { + __ mov(edx, FieldOperand(ebx, i)); + __ mov(ecx, FieldOperand(ebx, i + kPointerSize)); + __ mov(FieldOperand(eax, i), edx); + __ mov(FieldOperand(eax, i + kPointerSize), ecx); + } + if ((size % (2 * kPointerSize)) != 0) { + __ mov(edx, FieldOperand(ebx, size - kPointerSize)); + __ mov(FieldOperand(eax, size - kPointerSize), edx); + } +} + + +void LCodeGen::DoTypeof(LTypeof* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + DCHECK(ToRegister(instr->value()).is(ebx)); + Label end, do_call; + Register value_register = ToRegister(instr->value()); + __ JumpIfNotSmi(value_register, &do_call); + __ mov(eax, Immediate(isolate()->factory()->number_string())); + __ jmp(&end); + __ bind(&do_call); + TypeofStub stub(isolate()); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ bind(&end); +} + + +void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { + Register input = ToRegister(instr->value()); + Condition final_branch_condition = EmitTypeofIs(instr, input); + if (final_branch_condition != no_condition) { + EmitBranch(instr, final_branch_condition); + } +} + + +Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { + Label* true_label = instr->TrueLabel(chunk_); + Label* false_label = instr->FalseLabel(chunk_); + Handle<String> type_name = instr->type_literal(); + int left_block = instr->TrueDestination(chunk_); + int right_block = instr->FalseDestination(chunk_); + int next_block = GetNextEmittedBlock(); + + Label::Distance true_distance = left_block == next_block ? Label::kNear + : Label::kFar; + Label::Distance false_distance = right_block == next_block ? Label::kNear + : Label::kFar; + Condition final_branch_condition = no_condition; + if (String::Equals(type_name, factory()->number_string())) { + __ JumpIfSmi(input, true_label, true_distance); + __ cmp(FieldOperand(input, HeapObject::kMapOffset), + factory()->heap_number_map()); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->string_string())) { + __ JumpIfSmi(input, false_label, false_distance); + __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); + final_branch_condition = below; + + } else if (String::Equals(type_name, factory()->symbol_string())) { + __ JumpIfSmi(input, false_label, false_distance); + __ CmpObjectType(input, SYMBOL_TYPE, input); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->boolean_string())) { + __ cmp(input, factory()->true_value()); + __ j(equal, true_label, true_distance); + __ cmp(input, factory()->false_value()); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->undefined_string())) { + __ cmp(input, factory()->undefined_value()); + __ j(equal, true_label, true_distance); + __ JumpIfSmi(input, false_label, false_distance); + // Check for undetectable objects => true. + __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); + __ test_b(FieldOperand(input, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + final_branch_condition = not_zero; + + } else if (String::Equals(type_name, factory()->function_string())) { + __ JumpIfSmi(input, false_label, false_distance); + // Check for callable and not undetectable objects => true. + __ mov(input, FieldOperand(input, HeapObject::kMapOffset)); + __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset)); + __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); + __ cmp(input, 1 << Map::kIsCallable); + final_branch_condition = equal; + + } else if (String::Equals(type_name, factory()->object_string())) { + __ JumpIfSmi(input, false_label, false_distance); + __ cmp(input, factory()->null_value()); + __ j(equal, true_label, true_distance); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input); + __ j(below, false_label, false_distance); + // Check for callable or undetectable objects => false. + __ test_b(FieldOperand(input, Map::kBitFieldOffset), + (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)); + final_branch_condition = zero; + +// clang-format off +#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ + } else if (String::Equals(type_name, factory()->type##_string())) { \ + __ JumpIfSmi(input, false_label, false_distance); \ + __ cmp(FieldOperand(input, HeapObject::kMapOffset), \ + factory()->type##_map()); \ + final_branch_condition = equal; + SIMD128_TYPES(SIMD128_TYPE) +#undef SIMD128_TYPE + // clang-format on + + } else { + __ jmp(false_label, false_distance); + } + return final_branch_condition; +} + + +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { + Register temp = ToRegister(instr->temp()); + + EmitIsConstructCall(temp); + EmitBranch(instr, equal); +} + + +void LCodeGen::EmitIsConstructCall(Register temp) { + // Get the frame pointer for the calling frame. + __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ cmp(Operand(temp, StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &check_frame_marker, Label::kNear); + __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); +} + + +void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { + if (info()->ShouldEnsureSpaceForLazyDeopt()) { + // Ensure that we have enough space after the previous lazy-bailout + // instruction for patching the code here. + int current_pc = masm()->pc_offset(); + if (current_pc < last_lazy_deopt_pc_ + space_needed) { + int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; + __ Nop(padding_size); + } + } + last_lazy_deopt_pc_ = masm()->pc_offset(); +} + + +void LCodeGen::DoLazyBailout(LLazyBailout* instr) { + last_lazy_deopt_pc_ = masm()->pc_offset(); + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); +} + + +void LCodeGen::DoDeoptimize(LDeoptimize* instr) { + Deoptimizer::BailoutType type = instr->hydrogen()->type(); + // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the + // needed return address), even though the implementation of LAZY and EAGER is + // now identical. When LAZY is eventually completely folded into EAGER, remove + // the special case below. + if (info()->IsStub() && type == Deoptimizer::EAGER) { + type = Deoptimizer::LAZY; + } + DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type); +} + + +void LCodeGen::DoDummy(LDummy* instr) { + // Nothing to see here, move on! +} + + +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + +void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { + PushSafepointRegistersScope scope(this); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kStackGuard); + RecordSafepointWithLazyDeopt( + instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); +} + + +void LCodeGen::DoStackCheck(LStackCheck* instr) { + class DeferredStackCheck final : public LDeferredCode { + public: + DeferredStackCheck(LCodeGen* codegen, + LStackCheck* instr, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), instr_(instr) { } + void Generate() override { codegen()->DoDeferredStackCheck(instr_); } + LInstruction* instr() override { return instr_; } + + private: + LStackCheck* instr_; + }; + + DCHECK(instr->HasEnvironment()); + LEnvironment* env = instr->environment(); + // There is no LLazyBailout instruction for stack-checks. We have to + // prepare for lazy deoptimization explicitly here. + if (instr->hydrogen()->is_function_entry()) { + // Perform stack overflow check. + Label done; + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &done, Label::kNear); + + DCHECK(instr->context()->IsRegister()); + DCHECK(ToRegister(instr->context()).is(esi)); + CallCode(isolate()->builtins()->StackCheck(), + RelocInfo::CODE_TARGET, + instr); + __ bind(&done); + } else { + DCHECK(instr->hydrogen()->is_backwards_branch()); + // Perform stack overflow check if this goto needs it before jumping. + DeferredStackCheck* deferred_stack_check = + new(zone()) DeferredStackCheck(this, instr, x87_stack_); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(below, deferred_stack_check->entry()); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + __ bind(instr->done_label()); + deferred_stack_check->SetExit(instr->done_label()); + RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); + // Don't record a deoptimization index for the safepoint here. + // This will be done explicitly when emitting call and the safepoint in + // the deferred code. + } +} + + +void LCodeGen::DoOsrEntry(LOsrEntry* instr) { + // This is a pseudo-instruction that ensures that the environment here is + // properly registered for deoptimization and records the assembler's PC + // offset. + LEnvironment* environment = instr->environment(); + + // If the environment were already registered, we would have no way of + // backpatching it with the spill slot operands. + DCHECK(!environment->HasBeenRegistered()); + RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); + + GenerateOsrPrologue(); +} + + +void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { + DCHECK(ToRegister(instr->context()).is(esi)); + __ test(eax, Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr, Deoptimizer::kSmi); + + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); + DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType); + + Label use_cache, call_runtime; + __ CheckEnumCache(&call_runtime); + + __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); + __ jmp(&use_cache, Label::kNear); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(eax); + CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); + + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->meta_map()); + DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); + __ bind(&use_cache); +} + + +void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { + Register map = ToRegister(instr->map()); + Register result = ToRegister(instr->result()); + Label load_cache, done; + __ EnumLength(result, map); + __ cmp(result, Immediate(Smi::FromInt(0))); + __ j(not_equal, &load_cache, Label::kNear); + __ mov(result, isolate()->factory()->empty_fixed_array()); + __ jmp(&done, Label::kNear); + + __ bind(&load_cache); + __ LoadInstanceDescriptors(map, result); + __ mov(result, + FieldOperand(result, DescriptorArray::kEnumCacheOffset)); + __ mov(result, + FieldOperand(result, FixedArray::SizeFor(instr->idx()))); + __ bind(&done); + __ test(result, result); + DeoptimizeIf(equal, instr, Deoptimizer::kNoCache); +} + + +void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { + Register object = ToRegister(instr->value()); + __ cmp(ToRegister(instr->map()), + FieldOperand(object, HeapObject::kMapOffset)); + DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); +} + + +void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, + Register object, + Register index) { + PushSafepointRegistersScope scope(this); + __ push(object); + __ push(index); + __ xor_(esi, esi); + __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); + __ StoreToSafepointRegisterSlot(object, eax); +} + + +void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + class DeferredLoadMutableDouble final : public LDeferredCode { + public: + DeferredLoadMutableDouble(LCodeGen* codegen, + LLoadFieldByIndex* instr, + Register object, + Register index, + const X87Stack& x87_stack) + : LDeferredCode(codegen, x87_stack), + instr_(instr), + object_(object), + index_(index) { + } + void Generate() override { + codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); + } + LInstruction* instr() override { return instr_; } + + private: + LLoadFieldByIndex* instr_; + Register object_; + Register index_; + }; + + Register object = ToRegister(instr->object()); + Register index = ToRegister(instr->index()); + + DeferredLoadMutableDouble* deferred; + deferred = new(zone()) DeferredLoadMutableDouble( + this, instr, object, index, x87_stack_); + + Label out_of_object, done; + __ test(index, Immediate(Smi::FromInt(1))); + __ j(not_zero, deferred->entry()); + + __ sar(index, 1); + + __ cmp(index, Immediate(0)); + __ j(less, &out_of_object, Label::kNear); + __ mov(object, FieldOperand(object, + index, + times_half_pointer_size, + JSObject::kHeaderSize)); + __ jmp(&done, Label::kNear); + + __ bind(&out_of_object); + __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); + __ neg(index); + // Index is now equal to out of object property index plus 1. + __ mov(object, FieldOperand(object, + index, + times_half_pointer_size, + FixedArray::kHeaderSize - kPointerSize)); + __ bind(deferred->exit()); + __ bind(&done); +} + + +void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { + Register context = ToRegister(instr->context()); + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context); +} + + +void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { + Handle<ScopeInfo> scope_info = instr->scope_info(); + __ Push(scope_info); + __ push(ToRegister(instr->function())); + CallRuntime(Runtime::kPushBlockContext, 2, instr); + RecordSafepoint(Safepoint::kNoLazyDeopt); +} + + +#undef __ + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_X87 |