diff options
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r-- | deps/v8/src/ia32/assembler-ia32-inl.h | 26 | ||||
-rw-r--r-- | deps/v8/src/ia32/assembler-ia32.cc | 87 | ||||
-rw-r--r-- | deps/v8/src/ia32/assembler-ia32.h | 88 | ||||
-rw-r--r-- | deps/v8/src/ia32/builtins-ia32.cc | 1031 | ||||
-rw-r--r-- | deps/v8/src/ia32/code-stubs-ia32.cc | 1126 | ||||
-rw-r--r-- | deps/v8/src/ia32/code-stubs-ia32.h | 291 | ||||
-rw-r--r-- | deps/v8/src/ia32/codegen-ia32.cc | 46 | ||||
-rw-r--r-- | deps/v8/src/ia32/debug-ia32.cc | 95 | ||||
-rw-r--r-- | deps/v8/src/ia32/deoptimizer-ia32.cc | 100 | ||||
-rw-r--r-- | deps/v8/src/ia32/disasm-ia32.cc | 29 | ||||
-rw-r--r-- | deps/v8/src/ia32/full-codegen-ia32.cc | 361 | ||||
-rw-r--r-- | deps/v8/src/ia32/ic-ia32.cc | 160 | ||||
-rw-r--r-- | deps/v8/src/ia32/lithium-codegen-ia32.cc | 255 | ||||
-rw-r--r-- | deps/v8/src/ia32/lithium-codegen-ia32.h | 13 | ||||
-rw-r--r-- | deps/v8/src/ia32/lithium-ia32.cc | 64 | ||||
-rw-r--r-- | deps/v8/src/ia32/lithium-ia32.h | 28 | ||||
-rw-r--r-- | deps/v8/src/ia32/macro-assembler-ia32.cc | 737 | ||||
-rw-r--r-- | deps/v8/src/ia32/macro-assembler-ia32.h | 226 | ||||
-rw-r--r-- | deps/v8/src/ia32/regexp-macro-assembler-ia32.cc | 116 | ||||
-rw-r--r-- | deps/v8/src/ia32/stub-cache-ia32.cc | 401 |
20 files changed, 3561 insertions, 1719 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 0ca2d6b4a8..446aa3e2de 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -89,8 +89,13 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + if (host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -116,6 +121,10 @@ void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Memory::Object_at(pc_) = target; CPU::FlushICache(pc_, sizeof(Address)); + if (host() != NULL && target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } } @@ -147,6 +156,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; CPU::FlushICache(pc_, sizeof(Address)); + if (host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } } @@ -161,6 +176,11 @@ void RelocInfo::set_call_address(Address target) { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Assembler::set_target_address_at(pc_ + 1, target); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -194,7 +214,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitPointer(target_object_address()); + visitor->VisitEmbeddedPointer(this); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); @@ -222,7 +242,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitEmbeddedPointer(heap, this); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 999647487e..66a98841a2 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -55,6 +55,8 @@ uint64_t CpuFeatures::supported_ = 0; uint64_t CpuFeatures::found_by_runtime_probing_ = 0; +// The Probe method needs executable memory, so it uses Heap::CreateCode. +// Allocation failure is silent and leads to safe default. void CpuFeatures::Probe() { ASSERT(!initialized_); ASSERT(supported_ == 0); @@ -86,23 +88,23 @@ void CpuFeatures::Probe() { __ pushfd(); __ push(ecx); __ push(ebx); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // If we can modify bit 21 of the EFLAGS register, then CPUID is supported. __ pushfd(); __ pop(eax); - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ xor_(eax, 0x200000); // Flip bit 21. __ push(eax); __ popfd(); __ pushfd(); __ pop(eax); - __ xor_(eax, Operand(edx)); // Different if CPUID is supported. + __ xor_(eax, edx); // Different if CPUID is supported. __ j(not_zero, &cpuid); // CPUID not supported. Clear the supported features in edx:eax. - __ xor_(eax, Operand(eax)); - __ xor_(edx, Operand(edx)); + __ xor_(eax, eax); + __ xor_(edx, edx); __ jmp(&done); // Invoke CPUID with 1 in eax to get feature information in @@ -118,13 +120,13 @@ void CpuFeatures::Probe() { // Move the result from ecx:edx to edx:eax and make sure to mark the // CPUID feature as supported. - __ mov(eax, Operand(edx)); + __ mov(eax, edx); __ or_(eax, 1 << CPUID); - __ mov(edx, Operand(ecx)); + __ mov(edx, ecx); // Done. __ bind(&done); - __ mov(esp, Operand(ebp)); + __ mov(esp, ebp); __ pop(ebx); __ pop(ecx); __ popfd(); @@ -286,6 +288,18 @@ bool Operand::is_reg(Register reg) const { && ((buf_[0] & 0x07) == reg.code()); // register codes match. } + +bool Operand::is_reg_only() const { + return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only. +} + + +Register Operand::reg() const { + ASSERT(is_reg_only()); + return Register::from_code(buf_[0] & 0x07); +} + + // ----------------------------------------------------------------------------- // Implementation of Assembler. @@ -701,6 +715,13 @@ void Assembler::add(Register dst, const Operand& src) { } +void Assembler::add(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x01); + emit_operand(src, dst); +} + + void Assembler::add(const Operand& dst, const Immediate& x) { ASSERT(reloc_info_writer.last_pc() != NULL); EnsureSpace ensure_space(this); @@ -741,25 +762,29 @@ void Assembler::and_(const Operand& dst, Register src) { void Assembler::cmpb(const Operand& op, int8_t imm8) { EnsureSpace ensure_space(this); - EMIT(0x80); - emit_operand(edi, op); // edi == 7 + if (op.is_reg(eax)) { + EMIT(0x3C); + } else { + EMIT(0x80); + emit_operand(edi, op); // edi == 7 + } EMIT(imm8); } -void Assembler::cmpb(const Operand& dst, Register src) { - ASSERT(src.is_byte_register()); +void Assembler::cmpb(const Operand& op, Register reg) { + ASSERT(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x38); - emit_operand(src, dst); + emit_operand(reg, op); } -void Assembler::cmpb(Register dst, const Operand& src) { - ASSERT(dst.is_byte_register()); +void Assembler::cmpb(Register reg, const Operand& op) { + ASSERT(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x3A); - emit_operand(dst, src); + emit_operand(reg, op); } @@ -1069,18 +1094,6 @@ void Assembler::shr_cl(Register dst) { } -void Assembler::subb(const Operand& op, int8_t imm8) { - EnsureSpace ensure_space(this); - if (op.is_reg(eax)) { - EMIT(0x2c); - } else { - EMIT(0x80); - emit_operand(ebp, op); // ebp == 5 - } - EMIT(imm8); -} - - void Assembler::sub(const Operand& dst, const Immediate& x) { EnsureSpace ensure_space(this); emit_arith(5, dst, x); @@ -1094,14 +1107,6 @@ void Assembler::sub(Register dst, const Operand& src) { } -void Assembler::subb(Register dst, const Operand& src) { - ASSERT(dst.code() < 4); - EnsureSpace ensure_space(this); - EMIT(0x2A); - emit_operand(dst, src); -} - - void Assembler::sub(const Operand& dst, Register src) { EnsureSpace ensure_space(this); EMIT(0x29); @@ -1158,6 +1163,10 @@ void Assembler::test(const Operand& op, const Immediate& imm) { void Assembler::test_b(const Operand& op, uint8_t imm8) { + if (op.is_reg_only() && op.reg().code() >= 4) { + test(op, Immediate(imm8)); + return; + } EnsureSpace ensure_space(this); EMIT(0xF6); emit_operand(eax, op); @@ -1178,10 +1187,10 @@ void Assembler::xor_(Register dst, const Operand& src) { } -void Assembler::xor_(const Operand& src, Register dst) { +void Assembler::xor_(const Operand& dst, Register src) { EnsureSpace ensure_space(this); EMIT(0x31); - emit_operand(dst, src); + emit_operand(src, dst); } @@ -2471,7 +2480,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { return; } } - RelocInfo rinfo(pc_, rmode, data); + RelocInfo rinfo(pc_, rmode, data, NULL); reloc_info_writer.Write(&rinfo); } diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 4698e3ed1b..4dfde5f62f 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -75,6 +75,8 @@ struct Register { static inline Register FromAllocationIndex(int index); static Register from_code(int code) { + ASSERT(code >= 0); + ASSERT(code < kNumRegisters); Register r = { code }; return r; } @@ -300,9 +302,6 @@ enum ScaleFactor { class Operand BASE_EMBEDDED { public: - // reg - INLINE(explicit Operand(Register reg)); - // XMM reg INLINE(explicit Operand(XMMRegister xmm_reg)); @@ -347,12 +346,16 @@ class Operand BASE_EMBEDDED { // Returns true if this Operand is a wrapper for the specified register. bool is_reg(Register reg) const; + // Returns true if this Operand is a wrapper for one register. + bool is_reg_only() const; + + // Asserts that this Operand is a wrapper for one register and returns the + // register. + Register reg() const; + private: - byte buf_[6]; - // The number of bytes in buf_. - unsigned int len_; - // Only valid if len_ > 4. - RelocInfo::Mode rmode_; + // reg + INLINE(explicit Operand(Register reg)); // Set the ModRM byte without an encoded 'reg' register. The // register is encoded later as part of the emit_operand operation. @@ -362,7 +365,15 @@ class Operand BASE_EMBEDDED { inline void set_disp8(int8_t disp); inline void set_dispr(int32_t disp, RelocInfo::Mode rmode); + byte buf_[6]; + // The number of bytes in buf_. + unsigned int len_; + // Only valid if len_ > 4. + RelocInfo::Mode rmode_; + friend class Assembler; + friend class MacroAssembler; + friend class LCodeGen; }; @@ -671,7 +682,9 @@ class Assembler : public AssemblerBase { void leave(); // Moves + void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); } void mov_b(Register dst, const Operand& src); + void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); } void mov_b(const Operand& dst, int8_t imm8); void mov_b(const Operand& dst, Register src); @@ -687,17 +700,24 @@ class Assembler : public AssemblerBase { void mov(const Operand& dst, Handle<Object> handle); void mov(const Operand& dst, Register src); + void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); } void movsx_b(Register dst, const Operand& src); + void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); } void movsx_w(Register dst, const Operand& src); + void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); } void movzx_b(Register dst, const Operand& src); + void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); } void movzx_w(Register dst, const Operand& src); // Conditional moves void cmov(Condition cc, Register dst, int32_t imm32); void cmov(Condition cc, Register dst, Handle<Object> handle); + void cmov(Condition cc, Register dst, Register src) { + cmov(cc, dst, Operand(src)); + } void cmov(Condition cc, Register dst, const Operand& src); // Flag management. @@ -715,24 +735,31 @@ class Assembler : public AssemblerBase { void adc(Register dst, int32_t imm32); void adc(Register dst, const Operand& src); + void add(Register dst, Register src) { add(dst, Operand(src)); } void add(Register dst, const Operand& src); + void add(const Operand& dst, Register src); + void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); } void add(const Operand& dst, const Immediate& x); void and_(Register dst, int32_t imm32); void and_(Register dst, const Immediate& x); + void and_(Register dst, Register src) { and_(dst, Operand(src)); } void and_(Register dst, const Operand& src); - void and_(const Operand& src, Register dst); + void and_(const Operand& dst, Register src); void and_(const Operand& dst, const Immediate& x); + void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); } void cmpb(const Operand& op, int8_t imm8); - void cmpb(Register src, const Operand& dst); - void cmpb(const Operand& dst, Register src); + void cmpb(Register reg, const Operand& op); + void cmpb(const Operand& op, Register reg); void cmpb_al(const Operand& op); void cmpw_ax(const Operand& op); void cmpw(const Operand& op, Immediate imm16); void cmp(Register reg, int32_t imm32); void cmp(Register reg, Handle<Object> handle); + void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); } void cmp(Register reg, const Operand& op); + void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); } void cmp(const Operand& op, const Immediate& imm); void cmp(const Operand& op, Handle<Object> handle); @@ -748,6 +775,7 @@ class Assembler : public AssemblerBase { // Signed multiply instructions. void imul(Register src); // edx:eax = eax * src. + void imul(Register dst, Register src) { imul(dst, Operand(src)); } void imul(Register dst, const Operand& src); // dst = dst * src. void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. @@ -764,8 +792,10 @@ class Assembler : public AssemblerBase { void not_(Register dst); void or_(Register dst, int32_t imm32); + void or_(Register dst, Register src) { or_(dst, Operand(src)); } void or_(Register dst, const Operand& src); void or_(const Operand& dst, Register src); + void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); } void or_(const Operand& dst, const Immediate& x); void rcl(Register dst, uint8_t imm8); @@ -776,35 +806,42 @@ class Assembler : public AssemblerBase { void sbb(Register dst, const Operand& src); + void shld(Register dst, Register src) { shld(dst, Operand(src)); } void shld(Register dst, const Operand& src); void shl(Register dst, uint8_t imm8); void shl_cl(Register dst); + void shrd(Register dst, Register src) { shrd(dst, Operand(src)); } void shrd(Register dst, const Operand& src); void shr(Register dst, uint8_t imm8); void shr_cl(Register dst); - void subb(const Operand& dst, int8_t imm8); - void subb(Register dst, const Operand& src); + void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); } void sub(const Operand& dst, const Immediate& x); + void sub(Register dst, Register src) { sub(dst, Operand(src)); } void sub(Register dst, const Operand& src); void sub(const Operand& dst, Register src); void test(Register reg, const Immediate& imm); + void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); } void test(Register reg, const Operand& op); void test_b(Register reg, const Operand& op); void test(const Operand& op, const Immediate& imm); + void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); } void test_b(const Operand& op, uint8_t imm8); void xor_(Register dst, int32_t imm32); + void xor_(Register dst, Register src) { xor_(dst, Operand(src)); } void xor_(Register dst, const Operand& src); - void xor_(const Operand& src, Register dst); + void xor_(const Operand& dst, Register src); + void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); } void xor_(const Operand& dst, const Immediate& x); // Bit operations. void bt(const Operand& dst, Register src); + void bts(Register dst, Register src) { bts(Operand(dst), src); } void bts(const Operand& dst, Register src); // Miscellaneous @@ -835,6 +872,7 @@ class Assembler : public AssemblerBase { void call(Label* L); void call(byte* entry, RelocInfo::Mode rmode); int CallSize(const Operand& adr); + void call(Register reg) { call(Operand(reg)); } void call(const Operand& adr); int CallSize(Handle<Code> code, RelocInfo::Mode mode); void call(Handle<Code> code, @@ -845,6 +883,7 @@ class Assembler : public AssemblerBase { // unconditional jump to L void jmp(Label* L, Label::Distance distance = Label::kFar); void jmp(byte* entry, RelocInfo::Mode rmode); + void jmp(Register reg) { jmp(Operand(reg)); } void jmp(const Operand& adr); void jmp(Handle<Code> code, RelocInfo::Mode rmode); @@ -929,6 +968,7 @@ class Assembler : public AssemblerBase { void cvttss2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src); + void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); } void cvtsi2sd(XMMRegister dst, const Operand& src); void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src); @@ -969,12 +1009,14 @@ class Assembler : public AssemblerBase { void movdbl(XMMRegister dst, const Operand& src); void movdbl(const Operand& dst, XMMRegister src); + void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); } void movd(XMMRegister dst, const Operand& src); - void movd(const Operand& src, XMMRegister dst); + void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); } + void movd(const Operand& dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src); void movss(XMMRegister dst, const Operand& src); - void movss(const Operand& src, XMMRegister dst); + void movss(const Operand& dst, XMMRegister src); void movss(XMMRegister dst, XMMRegister src); void pand(XMMRegister dst, XMMRegister src); @@ -987,11 +1029,17 @@ class Assembler : public AssemblerBase { void psrlq(XMMRegister reg, int8_t shift); void psrlq(XMMRegister dst, XMMRegister src); void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); + void pextrd(Register dst, XMMRegister src, int8_t offset) { + pextrd(Operand(dst), src, offset); + } void pextrd(const Operand& dst, XMMRegister src, int8_t offset); + void pinsrd(XMMRegister dst, Register src, int8_t offset) { + pinsrd(dst, Operand(src), offset); + } void pinsrd(XMMRegister dst, const Operand& src, int8_t offset); // Parallel XMM operations. - void movntdqa(XMMRegister src, const Operand& dst); + void movntdqa(XMMRegister dst, const Operand& src); void movntdq(const Operand& dst, XMMRegister src); // Prefetch src position into cache level. // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a @@ -1045,6 +1093,9 @@ class Assembler : public AssemblerBase { static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; + byte byte_at(int pos) { return buffer_[pos]; } + void set_byte_at(int pos, byte value) { buffer_[pos] = value; } + protected: bool emit_debug_code() const { return emit_debug_code_; } @@ -1057,9 +1108,8 @@ class Assembler : public AssemblerBase { byte* addr_at(int pos) { return buffer_ + pos; } + private: - byte byte_at(int pos) { return buffer_[pos]; } - void set_byte_at(int pos, byte value) { buffer_[pos] = value; } uint32_t long_at(int pos) { return *reinterpret_cast<uint32_t*>(addr_at(pos)); } diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 310ea3d123..53ade3a6c9 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // JumpToExternalReference expects eax to contain the number of arguments // including the receiver and the extra arguments. - __ add(Operand(eax), Immediate(num_extra_args + 1)); + __ add(eax, Immediate(num_extra_args + 1)); __ JumpToExternalReference(ExternalReference(id, masm->isolate())); } @@ -80,25 +80,34 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- edi: constructor function // ----------------------------------- - Label non_function_call; + Label slow, non_function_call; // Check that function is not a smi. __ JumpIfSmi(edi, &non_function_call); // Check that function is a JSFunction. __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &non_function_call); + __ j(not_equal, &slow); // Jump to the function-specific construct stub. __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset)); __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize)); - __ jmp(Operand(ebx)); + __ jmp(ebx); // edi: called object // eax: number of arguments + // ecx: object map + Label do_call; + __ bind(&slow); + __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, &non_function_call); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + __ bind(&non_function_call); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); // Set expected number of arguments to zero (not changing eax). __ Set(ebx, Immediate(0)); - __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); Handle<Code> arguments_adaptor = masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); __ SetCallKind(ecx, CALL_AS_METHOD); @@ -113,264 +122,271 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ASSERT(!is_api_function || !count_constructions); // Enter a construct frame. - __ EnterConstructFrame(); + { + FrameScope scope(masm, StackFrame::CONSTRUCT); - // Store a smi-tagged arguments count on the stack. - __ SmiTag(eax); - __ push(eax); + // Store a smi-tagged arguments count on the stack. + __ SmiTag(eax); + __ push(eax); - // Push the function to invoke on the stack. - __ push(edi); + // Push the function to invoke on the stack. + __ push(edi); - // Try to allocate the object without transitioning into C code. If any of the - // preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(masm->isolate()); - __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); - __ j(not_equal, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(masm->isolate()); + __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); + __ j(not_equal, &rt_call); #endif - // Verified that the constructor is a JSFunction. - // Load the initial map and verify that it is in fact a map. - // edi: constructor - __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); - // Will both indicate a NULL and a Smi - __ JumpIfSmi(eax, &rt_call); - // edi: constructor - // eax: initial map (if proven valid below) - __ CmpObjectType(eax, MAP_TYPE, ebx); - __ j(not_equal, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see comments - // in Runtime_NewObject in runtime.cc). In which case the initial map's - // instance type would be JS_FUNCTION_TYPE. - // edi: constructor - // eax: initial map - __ CmpInstanceType(eax, JS_FUNCTION_TYPE); - __ j(equal, &rt_call); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset)); - __ j(not_zero, &allocate); + // Verified that the constructor is a JSFunction. + // Load the initial map and verify that it is in fact a map. + // edi: constructor + __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi + __ JumpIfSmi(eax, &rt_call); + // edi: constructor + // eax: initial map (if proven valid below) + __ CmpObjectType(eax, MAP_TYPE, ebx); + __ j(not_equal, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // edi: constructor + // eax: initial map + __ CmpInstanceType(eax, JS_FUNCTION_TYPE); + __ j(equal, &rt_call); - __ push(eax); - __ push(edi); + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ dec_b(FieldOperand(ecx, + SharedFunctionInfo::kConstructionCountOffset)); + __ j(not_zero, &allocate); - __ push(edi); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + __ push(eax); + __ push(edi); - __ pop(edi); - __ pop(eax); + __ push(edi); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - __ bind(&allocate); - } + __ pop(edi); + __ pop(eax); - // Now allocate the JSObject on the heap. - // edi: constructor - // eax: initial map - __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); - __ shl(edi, kPointerSizeLog2); - __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS); - // Allocated the JSObject, now initialize the fields. - // eax: initial map - // ebx: JSObject - // edi: start of next object - __ mov(Operand(ebx, JSObject::kMapOffset), eax); - Factory* factory = masm->isolate()->factory(); - __ mov(ecx, factory->empty_fixed_array()); - __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx); - __ mov(Operand(ebx, JSObject::kElementsOffset), ecx); - // Set extra fields in the newly allocated object. - // eax: initial map - // ebx: JSObject - // edi: start of next object - { Label loop, entry; - // To allow for truncation. + __ bind(&allocate); + } + + // Now allocate the JSObject on the heap. + // edi: constructor + // eax: initial map + __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); + __ shl(edi, kPointerSizeLog2); + __ AllocateInNewSpace( + edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS); + // Allocated the JSObject, now initialize the fields. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ mov(Operand(ebx, JSObject::kMapOffset), eax); + Factory* factory = masm->isolate()->factory(); + __ mov(ecx, factory->empty_fixed_array()); + __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx); + __ mov(Operand(ebx, JSObject::kElementsOffset), ecx); + // Set extra fields in the newly allocated object. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); + __ mov(edx, factory->undefined_value()); if (count_constructions) { + __ movzx_b(esi, + FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); + __ lea(esi, + Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize)); + // esi: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmp(esi, edi); + __ Assert(less_equal, + "Unexpected number of pre-allocated property fields."); + } + __ InitializeFieldsWithFiller(ecx, esi, edx); __ mov(edx, factory->one_pointer_filler_map()); - } else { + } + __ InitializeFieldsWithFiller(ecx, edi, edx); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ or_(ebx, Immediate(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. + // Allocate and initialize a FixedArray if it is. + // eax: initial map + // ebx: JSObject + // edi: start of next object + // Calculate the total number of properties described by the map. + __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset)); + __ movzx_b(ecx, + FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); + __ add(edx, ecx); + // Calculate unused properties past the end of the in-object properties. + __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset)); + __ sub(edx, ecx); + // Done if no extra properties are to be allocated. + __ j(zero, &allocated); + __ Assert(positive, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // ebx: JSObject + // edi: start of next object (will be start of FixedArray) + // edx: number of elements in properties array + __ AllocateInNewSpace(FixedArray::kHeaderSize, + times_pointer_size, + edx, + edi, + ecx, + no_reg, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // ebx: JSObject + // edi: FixedArray + // edx: number of elements + // ecx: start of next object + __ mov(eax, factory->fixed_array_map()); + __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map + __ SmiTag(edx); + __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length + + // Initialize the fields to undefined. + // ebx: JSObject + // edi: FixedArray + // ecx: start of next object + { Label loop, entry; __ mov(edx, factory->undefined_value()); + __ lea(eax, Operand(edi, FixedArray::kHeaderSize)); + __ jmp(&entry); + __ bind(&loop); + __ mov(Operand(eax, 0), edx); + __ add(eax, Immediate(kPointerSize)); + __ bind(&entry); + __ cmp(eax, ecx); + __ j(below, &loop); } - __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ mov(Operand(ecx, 0), edx); - __ add(Operand(ecx), Immediate(kPointerSize)); - __ bind(&entry); - __ cmp(ecx, Operand(edi)); - __ j(less, &loop); - } - - // Add the object tag to make the JSObject real, so that we can continue and - // jump into the continuation code at any time from now on. Any failures - // need to undo the allocation, so that the heap is in a consistent state - // and verifiable. - // eax: initial map - // ebx: JSObject - // edi: start of next object - __ or_(Operand(ebx), Immediate(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. - // Allocate and initialize a FixedArray if it is. - // eax: initial map - // ebx: JSObject - // edi: start of next object - // Calculate the total number of properties described by the map. - __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset)); - __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); - __ add(edx, Operand(ecx)); - // Calculate unused properties past the end of the in-object properties. - __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset)); - __ sub(edx, Operand(ecx)); - // Done if no extra properties are to be allocated. - __ j(zero, &allocated); - __ Assert(positive, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // ebx: JSObject - // edi: start of next object (will be start of FixedArray) - // edx: number of elements in properties array - __ AllocateInNewSpace(FixedArray::kHeaderSize, - times_pointer_size, - edx, - edi, - ecx, - no_reg, - &undo_allocation, - RESULT_CONTAINS_TOP); - - // Initialize the FixedArray. - // ebx: JSObject - // edi: FixedArray - // edx: number of elements - // ecx: start of next object - __ mov(eax, factory->fixed_array_map()); - __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map - __ SmiTag(edx); - __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length - - // Initialize the fields to undefined. - // ebx: JSObject - // edi: FixedArray - // ecx: start of next object - { Label loop, entry; - __ mov(edx, factory->undefined_value()); - __ lea(eax, Operand(edi, FixedArray::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ mov(Operand(eax, 0), edx); - __ add(Operand(eax), Immediate(kPointerSize)); - __ bind(&entry); - __ cmp(eax, Operand(ecx)); - __ j(below, &loop); - } - // Store the initialized FixedArray into the properties field of - // the JSObject - // ebx: JSObject - // edi: FixedArray - __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag - __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi); + // Store the initialized FixedArray into the properties field of + // the JSObject + // ebx: JSObject + // edi: FixedArray + __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag + __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi); - // Continue with JSObject being successfully allocated - // ebx: JSObject - __ jmp(&allocated); + // Continue with JSObject being successfully allocated + // ebx: JSObject + __ jmp(&allocated); - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // ebx: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(ebx); - } + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // ebx: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(ebx); + } - // Allocate the new receiver object using the runtime call. - __ bind(&rt_call); - // Must restore edi (constructor) before calling runtime. - __ mov(edi, Operand(esp, 0)); - // edi: function (constructor) - __ push(edi); - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(ebx, Operand(eax)); // store result in ebx + // Allocate the new receiver object using the runtime call. + __ bind(&rt_call); + // Must restore edi (constructor) before calling runtime. + __ mov(edi, Operand(esp, 0)); + // edi: function (constructor) + __ push(edi); + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(ebx, eax); // store result in ebx - // New object allocated. - // ebx: newly allocated object - __ bind(&allocated); - // Retrieve the function from the stack. - __ pop(edi); + // New object allocated. + // ebx: newly allocated object + __ bind(&allocated); + // Retrieve the function from the stack. + __ pop(edi); - // Retrieve smi-tagged arguments count from the stack. - __ mov(eax, Operand(esp, 0)); - __ SmiUntag(eax); + // Retrieve smi-tagged arguments count from the stack. + __ mov(eax, Operand(esp, 0)); + __ SmiUntag(eax); - // Push the allocated receiver to the stack. We need two copies - // because we may have to return the original one and the calling - // conventions dictate that the called function pops the receiver. - __ push(ebx); - __ push(ebx); + // Push the allocated receiver to the stack. We need two copies + // because we may have to return the original one and the calling + // conventions dictate that the called function pops the receiver. + __ push(ebx); + __ push(ebx); - // Setup pointer to last argument. - __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); + // Setup pointer to last argument. + __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); - // Copy arguments and receiver to the expression stack. - Label loop, entry; - __ mov(ecx, Operand(eax)); - __ jmp(&entry); - __ bind(&loop); - __ push(Operand(ebx, ecx, times_4, 0)); - __ bind(&entry); - __ dec(ecx); - __ j(greater_equal, &loop); + // Copy arguments and receiver to the expression stack. + Label loop, entry; + __ mov(ecx, eax); + __ jmp(&entry); + __ bind(&loop); + __ push(Operand(ebx, ecx, times_4, 0)); + __ bind(&entry); + __ dec(ecx); + __ j(greater_equal, &loop); + + // Call the function. + if (is_api_function) { + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + } else { + ParameterCount actual(eax); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Call the function. - if (is_api_function) { - __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, - CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); - } else { - ParameterCount actual(eax); - __ InvokeFunction(edi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Restore context from the frame. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - // Restore context from the frame. - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; + // If the result is a smi, it is *not* an object in the ECMA sense. + __ JumpIfSmi(eax, &use_receiver); - // If the result is a smi, it is *not* an object in the ECMA sense. - __ JumpIfSmi(eax, &use_receiver); + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &exit); - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); - __ j(above_equal, &exit); + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ mov(eax, Operand(esp, 0)); - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ mov(eax, Operand(esp, 0)); + // Restore the arguments count and leave the construct frame. + __ bind(&exit); + __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count. - // Restore the arguments count and leave the construct frame. - __ bind(&exit); - __ mov(ebx, Operand(esp, kPointerSize)); // get arguments count - __ LeaveConstructFrame(); + // Leave construct frame. + } // Remove caller arguments from the stack and return. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); @@ -399,57 +415,58 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, bool is_construct) { - // Clear the context before we push it when entering the JS frame. + // Clear the context before we push it when entering the internal frame. __ Set(esi, Immediate(0)); - // Enter an internal frame. - __ EnterInternalFrame(); - - // Load the previous frame pointer (ebx) to access C arguments - __ mov(ebx, Operand(ebp, 0)); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Get the function from the frame and setup the context. - __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); - __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset)); + // Load the previous frame pointer (ebx) to access C arguments + __ mov(ebx, Operand(ebp, 0)); - // Push the function and the receiver onto the stack. - __ push(ecx); - __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset)); + // Get the function from the frame and setup the context. + __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); + __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset)); - // Load the number of arguments and setup pointer to the arguments. - __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset)); - __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset)); + // Push the function and the receiver onto the stack. + __ push(ecx); + __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset)); - // Copy arguments to the stack in a loop. - Label loop, entry; - __ Set(ecx, Immediate(0)); - __ jmp(&entry); - __ bind(&loop); - __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv - __ push(Operand(edx, 0)); // dereference handle - __ inc(Operand(ecx)); - __ bind(&entry); - __ cmp(ecx, Operand(eax)); - __ j(not_equal, &loop); + // Load the number of arguments and setup pointer to the arguments. + __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset)); + __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset)); - // Get the function from the stack and call it. - __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver + // Copy arguments to the stack in a loop. + Label loop, entry; + __ Set(ecx, Immediate(0)); + __ jmp(&entry); + __ bind(&loop); + __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv + __ push(Operand(edx, 0)); // dereference handle + __ inc(ecx); + __ bind(&entry); + __ cmp(ecx, eax); + __ j(not_equal, &loop); + + // Get the function from the stack and call it. + // kPointerSize for the receiver. + __ mov(edi, Operand(esp, eax, times_4, kPointerSize)); + + // Invoke the code. + if (is_construct) { + __ call(masm->isolate()->builtins()->JSConstructCall(), + RelocInfo::CODE_TARGET); + } else { + ParameterCount actual(eax); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Invoke the code. - if (is_construct) { - __ call(masm->isolate()->builtins()->JSConstructCall(), - RelocInfo::CODE_TARGET); - } else { - ParameterCount actual(eax); - __ InvokeFunction(edi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Exit the internal frame. Notice that this also removes the empty. + // context and the function left on the stack by the code + // invocation. } - - // Exit the JS frame. Notice that this also removes the empty - // context and the function left on the stack by the code - // invocation. - __ LeaveInternalFrame(); - __ ret(1 * kPointerSize); // remove receiver + __ ret(kPointerSize); // Remove receiver. } @@ -464,68 +481,68 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function. - __ push(edi); - // Push call kind information. - __ push(ecx); + // Push a copy of the function. + __ push(edi); + // Push call kind information. + __ push(ecx); - __ push(edi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyCompile, 1); + __ push(edi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyCompile, 1); - // Restore call kind information. - __ pop(ecx); - // Restore receiver. - __ pop(edi); + // Restore call kind information. + __ pop(ecx); + // Restore receiver. + __ pop(edi); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(eax)); + __ jmp(eax); } void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function onto the stack. - __ push(edi); - // Push call kind information. - __ push(ecx); + // Push a copy of the function onto the stack. + __ push(edi); + // Push call kind information. + __ push(ecx); - __ push(edi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyRecompile, 1); + __ push(edi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyRecompile, 1); - // Restore call kind information. - __ pop(ecx); - // Restore receiver. - __ pop(edi); + // Restore call kind information. + __ pop(ecx); + // Restore receiver. + __ pop(edi); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(eax)); + __ jmp(eax); } static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Pass the function and deoptimization type to the runtime system. - __ push(Immediate(Smi::FromInt(static_cast<int>(type)))); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + // Pass the function and deoptimization type to the runtime system. + __ push(Immediate(Smi::FromInt(static_cast<int>(type)))); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Get the full codegen state from the stack and untag it. __ mov(ecx, Operand(esp, 1 * kPointerSize)); @@ -566,9 +583,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ pushad(); - __ EnterInternalFrame(); - __ CallRuntime(Runtime::kNotifyOSR, 0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyOSR, 0); + } __ popad(); __ ret(0); } @@ -579,7 +597,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. { Label done; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &done); __ pop(ebx); __ push(Immediate(factory->undefined_value())); @@ -631,18 +649,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ j(above_equal, &shift_arguments); __ bind(&convert_to_object); - __ EnterInternalFrame(); // In order to preserve argument count. - __ SmiTag(eax); - __ push(eax); - __ push(ebx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(ebx, eax); - __ Set(edx, Immediate(0)); // restore + { // In order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(eax); + __ push(eax); + + __ push(ebx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(ebx, eax); + __ Set(edx, Immediate(0)); // restore + + __ pop(eax); + __ SmiUntag(eax); + } - __ pop(eax); - __ SmiUntag(eax); - __ LeaveInternalFrame(); // Restore the function to edi. __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize)); __ jmp(&patch_receiver); @@ -695,11 +716,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, // or a function proxy via CALL_FUNCTION_PROXY. { Label function, non_proxy; - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(zero, &function); __ Set(ebx, Immediate(0)); __ SetCallKind(ecx, CALL_AS_METHOD); - __ cmp(Operand(edx), Immediate(1)); + __ cmp(edx, Immediate(1)); __ j(not_equal, &non_proxy); __ pop(edx); // return address @@ -726,13 +747,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); __ SmiUntag(ebx); __ SetCallKind(ecx, CALL_AS_METHOD); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(not_equal, masm->isolate()->builtins()->ArgumentsAdaptorTrampoline()); ParameterCount expected(0); - __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); } @@ -740,155 +761,156 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { static const int kArgumentsOffset = 2 * kPointerSize; static const int kReceiverOffset = 3 * kPointerSize; static const int kFunctionOffset = 4 * kPointerSize; + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + + __ push(Operand(ebp, kFunctionOffset)); // push this + __ push(Operand(ebp, kArgumentsOffset)); // push arguments + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + ExternalReference real_stack_limit = + ExternalReference::address_of_real_stack_limit(masm->isolate()); + __ mov(edi, Operand::StaticVariable(real_stack_limit)); + // Make ecx the space we have left. The stack might already be overflowed + // here which will cause ecx to become negative. + __ mov(ecx, esp); + __ sub(ecx, edi); + // Make edx the space we need for the array when it is unrolled onto the + // stack. + __ mov(edx, eax); + __ shl(edx, kPointerSizeLog2 - kSmiTagSize); + // Check if the arguments will overflow the stack. + __ cmp(ecx, edx); + __ j(greater, &okay); // Signed comparison. + + // Out of stack space. + __ push(Operand(ebp, 4 * kPointerSize)); // push this + __ push(eax); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ bind(&okay); + // End of stack check. + + // Push current index and limit. + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; + const int kIndexOffset = kLimitOffset - 1 * kPointerSize; + __ push(eax); // limit + __ push(Immediate(0)); // index + + // Get the receiver. + __ mov(ebx, Operand(ebp, kReceiverOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ mov(edi, Operand(ebp, kFunctionOffset)); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &push_receiver); + + // Change context eagerly to get the right global object if necessary. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - __ EnterInternalFrame(); - - __ push(Operand(ebp, kFunctionOffset)); // push this - __ push(Operand(ebp, kArgumentsOffset)); // push arguments - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - ExternalReference real_stack_limit = - ExternalReference::address_of_real_stack_limit(masm->isolate()); - __ mov(edi, Operand::StaticVariable(real_stack_limit)); - // Make ecx the space we have left. The stack might already be overflowed - // here which will cause ecx to become negative. - __ mov(ecx, Operand(esp)); - __ sub(ecx, Operand(edi)); - // Make edx the space we need for the array when it is unrolled onto the - // stack. - __ mov(edx, Operand(eax)); - __ shl(edx, kPointerSizeLog2 - kSmiTagSize); - // Check if the arguments will overflow the stack. - __ cmp(ecx, Operand(edx)); - __ j(greater, &okay); // Signed comparison. - - // Out of stack space. - __ push(Operand(ebp, 4 * kPointerSize)); // push this - __ push(eax); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - __ bind(&okay); - // End of stack check. - - // Push current index and limit. - const int kLimitOffset = - StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; - const int kIndexOffset = kLimitOffset - 1 * kPointerSize; - __ push(eax); // limit - __ push(Immediate(0)); // index - - // Get the receiver. - __ mov(ebx, Operand(ebp, kReceiverOffset)); - - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ mov(edi, Operand(ebp, kFunctionOffset)); - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &push_receiver); + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &push_receiver); - // Change context eagerly to get the right global object if necessary. - __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + Factory* factory = masm->isolate()->factory(); - // Compute the receiver. - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), - 1 << SharedFunctionInfo::kStrictModeBitWithinByte); - __ j(not_equal, &push_receiver); + // Do not transform the receiver for natives (shared already in ecx). + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, &push_receiver); - Factory* factory = masm->isolate()->factory(); + // Compute the receiver in non-strict mode. + // Call ToObject on the receiver if it is not an object, or use the + // global object if it is null or undefined. + __ JumpIfSmi(ebx, &call_to_object); + __ cmp(ebx, factory->null_value()); + __ j(equal, &use_global_receiver); + __ cmp(ebx, factory->undefined_value()); + __ j(equal, &use_global_receiver); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &push_receiver); - // Do not transform the receiver for natives (shared already in ecx). - __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), - 1 << SharedFunctionInfo::kNativeBitWithinByte); - __ j(not_equal, &push_receiver); - - // Compute the receiver in non-strict mode. - // Call ToObject on the receiver if it is not an object, or use the - // global object if it is null or undefined. - __ JumpIfSmi(ebx, &call_to_object); - __ cmp(ebx, factory->null_value()); - __ j(equal, &use_global_receiver); - __ cmp(ebx, factory->undefined_value()); - __ j(equal, &use_global_receiver); - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); - __ j(above_equal, &push_receiver); - - __ bind(&call_to_object); - __ push(ebx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(ebx, Operand(eax)); - __ jmp(&push_receiver); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ mov(ebx, FieldOperand(esi, kGlobalOffset)); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset)); - __ mov(ebx, FieldOperand(ebx, kGlobalOffset)); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - __ bind(&push_receiver); - __ push(ebx); - - // Copy all arguments from the array to the stack. - Label entry, loop; - __ mov(eax, Operand(ebp, kIndexOffset)); - __ jmp(&entry); - __ bind(&loop); - __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments + __ bind(&call_to_object); + __ push(ebx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(ebx, eax); + __ jmp(&push_receiver); - // Use inline caching to speed up access to arguments. - Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); - __ call(ic, RelocInfo::CODE_TARGET); - // It is important that we do not have a test instruction after the - // call. A test instruction after the call is used to indicate that - // we have generated an inline version of the keyed load. In this - // case, we know that we are not generating a test instruction next. + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ mov(ebx, FieldOperand(esi, kGlobalOffset)); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset)); + __ mov(ebx, FieldOperand(ebx, kGlobalOffset)); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); - // Push the nth argument. - __ push(eax); + // Push the receiver. + __ bind(&push_receiver); + __ push(ebx); - // Update the index on the stack and in register eax. - __ mov(eax, Operand(ebp, kIndexOffset)); - __ add(Operand(eax), Immediate(1 << kSmiTagSize)); - __ mov(Operand(ebp, kIndexOffset), eax); + // Copy all arguments from the array to the stack. + Label entry, loop; + __ mov(eax, Operand(ebp, kIndexOffset)); + __ jmp(&entry); + __ bind(&loop); + __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments - __ bind(&entry); - __ cmp(eax, Operand(ebp, kLimitOffset)); - __ j(not_equal, &loop); + // Use inline caching to speed up access to arguments. + Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); + __ call(ic, RelocInfo::CODE_TARGET); + // It is important that we do not have a test instruction after the + // call. A test instruction after the call is used to indicate that + // we have generated an inline version of the keyed load. In this + // case, we know that we are not generating a test instruction next. - // Invoke the function. - Label call_proxy; - ParameterCount actual(eax); - __ SmiUntag(eax); - __ mov(edi, Operand(ebp, kFunctionOffset)); - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &call_proxy); - __ InvokeFunction(edi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Push the nth argument. + __ push(eax); - __ LeaveInternalFrame(); - __ ret(3 * kPointerSize); // remove this, receiver, and arguments + // Update the index on the stack and in register eax. + __ mov(eax, Operand(ebp, kIndexOffset)); + __ add(eax, Immediate(1 << kSmiTagSize)); + __ mov(Operand(ebp, kIndexOffset), eax); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(edi); // add function proxy as last argument - __ inc(eax); - __ Set(ebx, Immediate(0)); - __ SetCallKind(ecx, CALL_AS_METHOD); - __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); - __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + __ bind(&entry); + __ cmp(eax, Operand(ebp, kLimitOffset)); + __ j(not_equal, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(eax); + __ SmiUntag(eax); + __ mov(edi, Operand(ebp, kFunctionOffset)); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &call_proxy); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + frame_scope.GenerateLeaveFrame(); + __ ret(3 * kPointerSize); // remove this, receiver, and arguments + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(edi); // add function proxy as last argument + __ inc(eax); + __ Set(ebx, Immediate(0)); + __ SetCallKind(ecx, CALL_AS_METHOD); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); + __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); - __ LeaveInternalFrame(); + // Leave internal frame. + } __ ret(3 * kPointerSize); // remove this, receiver, and arguments } @@ -983,9 +1005,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, __ jmp(&entry); __ bind(&loop); __ mov(Operand(scratch1, 0), factory->the_hole_value()); - __ add(Operand(scratch1), Immediate(kPointerSize)); + __ add(scratch1, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(scratch1, Operand(scratch2)); + __ cmp(scratch1, scratch2); __ j(below, &loop); } } @@ -1082,7 +1104,7 @@ static void AllocateJSArray(MacroAssembler* masm, __ bind(&loop); __ stos(); __ bind(&entry); - __ cmp(edi, Operand(elements_array_end)); + __ cmp(edi, elements_array_end); __ j(below, &loop); __ bind(&done); } @@ -1120,7 +1142,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ push(eax); // Check for array construction with zero arguments. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &argc_one_or_more); __ bind(&empty_array); @@ -1147,7 +1169,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ j(not_equal, &argc_two_or_more); STATIC_ASSERT(kSmiTag == 0); __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize)); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, ¬_empty_array); // The single argument passed is zero, so we jump to the code above used to @@ -1160,7 +1182,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ mov(eax, Operand(esp, i * kPointerSize)); __ mov(Operand(esp, (i + 1) * kPointerSize), eax); } - __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots. + __ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots. __ push(Immediate(0)); // Treat this as a call with argc of zero. __ jmp(&empty_array); @@ -1250,7 +1272,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ bind(&loop); __ mov(eax, Operand(edi, ecx, times_pointer_size, 0)); __ mov(Operand(edx, 0), eax); - __ add(Operand(edx), Immediate(kPointerSize)); + __ add(edx, Immediate(kPointerSize)); __ bind(&entry); __ dec(ecx); __ j(greater_equal, &loop); @@ -1356,14 +1378,14 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { if (FLAG_debug_code) { __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx); - __ cmp(edi, Operand(ecx)); + __ cmp(edi, ecx); __ Assert(equal, "Unexpected String function"); } // Load the first argument into eax and get rid of the rest // (including the receiver). Label no_arguments; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &no_arguments); __ mov(ebx, Operand(esp, eax, times_pointer_size, 0)); __ pop(ecx); @@ -1439,12 +1461,13 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Invoke the conversion builtin and put the result into ebx. __ bind(&convert_argument); __ IncrementCounter(counters->string_ctor_conversions(), 1); - __ EnterInternalFrame(); - __ push(edi); // Preserve the function. - __ push(eax); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - __ pop(edi); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edi); // Preserve the function. + __ push(eax); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ pop(edi); + } __ mov(ebx, eax); __ jmp(&argument_is_string); @@ -1461,17 +1484,18 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1); - __ EnterInternalFrame(); - __ push(ebx); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(ebx); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } __ ret(0); } static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ push(ebp); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // Store the arguments adaptor context sentinel. __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); @@ -1515,7 +1539,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1); Label enough, too_few; - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(less, &too_few); __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel); __ j(equal, &dont_adapt_arguments); @@ -1533,8 +1557,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(©); __ inc(edi); __ push(Operand(eax, 0)); - __ sub(Operand(eax), Immediate(kPointerSize)); - __ cmp(edi, Operand(ebx)); + __ sub(eax, Immediate(kPointerSize)); + __ cmp(edi, ebx); __ j(less, ©); __ jmp(&invoke); } @@ -1547,17 +1571,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { const int offset = StandardFrameConstants::kCallerSPOffset; __ lea(edi, Operand(ebp, eax, times_4, offset)); // ebx = expected - actual. - __ sub(ebx, Operand(eax)); + __ sub(ebx, eax); // eax = -actual - 1 __ neg(eax); - __ sub(Operand(eax), Immediate(1)); + __ sub(eax, Immediate(1)); Label copy; __ bind(©); __ inc(eax); __ push(Operand(edi, 0)); - __ sub(Operand(edi), Immediate(kPointerSize)); - __ test(eax, Operand(eax)); + __ sub(edi, Immediate(kPointerSize)); + __ test(eax, eax); __ j(not_zero, ©); // Fill remaining expected arguments with undefined values. @@ -1565,7 +1589,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&fill); __ inc(eax); __ push(Immediate(masm->isolate()->factory()->undefined_value())); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(less, &fill); } @@ -1573,7 +1597,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&invoke); // Restore function pointer. __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ call(Operand(edx)); + __ call(edx); // Leave frame and return. LeaveArgumentsAdaptorFrame(masm); @@ -1583,13 +1607,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Dont adapt arguments. // ------------------------------------------- __ bind(&dont_adapt_arguments); - __ jmp(Operand(edx)); + __ jmp(edx); } void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { CpuFeatures::TryForceFeatureScope scope(SSE2); - if (!CpuFeatures::IsSupported(SSE2)) { + if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) { __ Abort("Unreachable code: Cannot optimize without SSE2 support."); return; } @@ -1616,15 +1640,16 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Pass the function to optimize as the argument to the on-stack // replacement runtime function. - __ EnterInternalFrame(); - __ push(eax); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(eax); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. Label skip; - __ cmp(Operand(eax), Immediate(Smi::FromInt(-1))); + __ cmp(eax, Immediate(Smi::FromInt(-1))); __ j(not_equal, &skip, Label::kNear); __ ret(0); @@ -1638,7 +1663,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { __ j(above_equal, &ok, Label::kNear); StackCheckStub stub; __ TailCallStub(&stub); - __ Abort("Unreachable code: returned from tail call."); + if (FLAG_debug_code) { + __ Abort("Unreachable code: returned from tail call."); + } __ bind(&ok); __ ret(0); diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 1009aaf573..1e886e202b 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -49,7 +49,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) { __ bind(&check_heap_number); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); + __ cmp(ebx, Immediate(factory->heap_number_map())); __ j(not_equal, &call_builtin, Label::kNear); __ ret(0); @@ -150,7 +150,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } // Return and remove the on-stack parameter. - __ mov(esi, Operand(eax)); + __ mov(esi, eax); __ ret(1 * kPointerSize); // Need to collect. Call into runtime system. @@ -159,6 +159,77 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [esp + (1 * kPointerSize)]: function + // [esp + (2 * kPointerSize)]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + eax, ebx, ecx, &gc, TAG_OBJECT); + + // Get the function or sentinel from the stack. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + + // Get the serialized scope info from the stack. + __ mov(ebx, Operand(esp, 2 * kPointerSize)); + + // Setup the object header. + Factory* factory = masm->isolate()->factory(); + __ mov(FieldOperand(eax, HeapObject::kMapOffset), + factory->block_context_map()); + __ mov(FieldOperand(eax, Context::kLengthOffset), + Immediate(Smi::FromInt(length))); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmp(ecx, 0); + __ Assert(equal, message); + } + __ mov(ecx, GlobalObjectOperand()); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); + __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Setup the fixed slots. + __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx); + __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi); + __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx); + + // Copy the global object from the previous context. + __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX)); + __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx); + + // Initialize the rest of the slots to the hole value. + if (slots_ == 1) { + __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS), + factory->the_hole_value()); + } else { + __ mov(ebx, factory->the_hole_value()); + for (int i = 0; i < slots_; i++) { + __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx); + } + } + + // Return and remove the on-stack parameters. + __ mov(esi, eax); + __ ret(2 * kPointerSize); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -239,6 +310,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // The stub expects its argument on the stack and returns its result in tos_: // zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. Label patch; Factory* factory = masm->isolate()->factory(); const Register argument = eax; @@ -336,6 +409,41 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ pushad(); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + XMMRegister reg = XMMRegister::from_code(i); + __ movdbl(Operand(esp, i * kDoubleSize), reg); + } + } + const int argument_count = 1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, ecx); + __ mov(Operand(esp, 0 * kPointerSize), + Immediate(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + XMMRegister reg = XMMRegister::from_code(i); + __ movdbl(reg, Operand(esp, i * kDoubleSize)); + } + __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + } + __ popad(); + __ ret(0); +} + + void ToBooleanStub::CheckOddball(MacroAssembler* masm, Type type, Heap::RootListIndex value, @@ -470,27 +578,27 @@ static void IntegerConvert(MacroAssembler* masm, // Check whether the exponent is too big for a 64 bit signed integer. static const uint32_t kTooBigExponent = (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); + __ cmp(scratch2, Immediate(kTooBigExponent)); __ j(greater_equal, conversion_failure); // Load x87 register with heap number. __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); // Reserve space for 64 bit answer. - __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint. // Do conversion, which cannot fail because we checked the exponent. __ fisttp_d(Operand(esp, 0)); __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. - __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + __ add(esp, Immediate(sizeof(uint64_t))); // Nolint. } else { // Load ecx with zero. We use this either for the final shift or // for the answer. - __ xor_(ecx, Operand(ecx)); + __ xor_(ecx, ecx); // Check whether the exponent matches a 32 bit signed int that cannot be // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the // exponent is 30 (biased). This is the exponent that we are fastest at and // also the highest exponent we can handle here. const uint32_t non_smi_exponent = (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); + __ cmp(scratch2, Immediate(non_smi_exponent)); // If we have a match of the int32-but-not-Smi exponent then skip some // logic. __ j(equal, &right_exponent, Label::kNear); @@ -503,7 +611,7 @@ static void IntegerConvert(MacroAssembler* masm, // >>> operator has a tendency to generate numbers with an exponent of 31. const uint32_t big_non_smi_exponent = (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); + __ cmp(scratch2, Immediate(big_non_smi_exponent)); __ j(not_equal, conversion_failure); // We have the big exponent, typically from >>>. This means the number is // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. @@ -522,9 +630,9 @@ static void IntegerConvert(MacroAssembler* masm, // Shift down 21 bits to get the most significant 11 bits or the low // mantissa word. __ shr(ecx, 32 - big_shift_distance); - __ or_(ecx, Operand(scratch2)); + __ or_(ecx, scratch2); // We have the answer in ecx, but we may need to negate it. - __ test(scratch, Operand(scratch)); + __ test(scratch, scratch); __ j(positive, &done, Label::kNear); __ neg(ecx); __ jmp(&done, Label::kNear); @@ -538,14 +646,14 @@ static void IntegerConvert(MacroAssembler* masm, // it rounds to zero. const uint32_t zero_exponent = (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; - __ sub(Operand(scratch2), Immediate(zero_exponent)); + __ sub(scratch2, Immediate(zero_exponent)); // ecx already has a Smi zero. __ j(less, &done, Label::kNear); // We have a shifted exponent between 0 and 30 in scratch2. __ shr(scratch2, HeapNumber::kExponentShift); __ mov(ecx, Immediate(30)); - __ sub(ecx, Operand(scratch2)); + __ sub(ecx, scratch2); __ bind(&right_exponent); // Here ecx is the shift, scratch is the exponent word. @@ -565,19 +673,19 @@ static void IntegerConvert(MacroAssembler* masm, // Shift down 22 bits to get the most significant 10 bits or the low // mantissa word. __ shr(scratch2, 32 - shift_distance); - __ or_(scratch2, Operand(scratch)); + __ or_(scratch2, scratch); // Move down according to the exponent. __ shr_cl(scratch2); // Now the unsigned answer is in scratch2. We need to move it to ecx and // we may need to fix the sign. Label negative; - __ xor_(ecx, Operand(ecx)); + __ xor_(ecx, ecx); __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); __ j(greater, &negative, Label::kNear); __ mov(ecx, scratch2); __ jmp(&done, Label::kNear); __ bind(&negative); - __ sub(ecx, Operand(scratch2)); + __ sub(ecx, scratch2); __ bind(&done); } } @@ -679,13 +787,13 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, __ JumpIfNotSmi(eax, non_smi, non_smi_near); // We can't handle -0 with smis, so use a type transition for that case. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, slow, slow_near); // Try optimistic subtraction '0 - value', saving operand in eax for undo. - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ Set(eax, Immediate(0)); - __ sub(eax, Operand(edx)); + __ sub(eax, edx); __ j(overflow, undo, undo_near); __ ret(0); } @@ -706,7 +814,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot( void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) { - __ mov(eax, Operand(edx)); + __ mov(eax, edx); } @@ -760,7 +868,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(HeapNumber::kSignMask)); // Flip sign. } else { - __ mov(edx, Operand(eax)); + __ mov(edx, eax); // edx: operand Label slow_allocate_heapnumber, heapnumber_allocated; @@ -768,11 +876,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated, Label::kNear); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(edx); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ pop(edx); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edx); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ pop(edx); + } __ bind(&heapnumber_allocated); // eax: allocated 'empty' number @@ -815,15 +924,16 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - // Push the original HeapNumber on the stack. The integer value can't - // be stored since it's untagged and not in the smi range (so we can't - // smi-tag it). We'll recalculate the value after the GC instead. - __ push(ebx); - __ CallRuntime(Runtime::kNumberAlloc, 0); - // New HeapNumber is in eax. - __ pop(edx); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push the original HeapNumber on the stack. The integer value can't + // be stored since it's untagged and not in the smi range (so we can't + // smi-tag it). We'll recalculate the value after the GC instead. + __ push(ebx); + __ CallRuntime(Runtime::kNumberAlloc, 0); + // New HeapNumber is in eax. + __ pop(edx); + } // IntegerConvert uses ebx and edi as scratch registers. // This conversion won't go slow-case. IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow); @@ -833,7 +943,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, } if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ecx)); + __ cvtsi2sd(xmm0, ecx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ push(ecx); @@ -947,6 +1057,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -1022,7 +1136,7 @@ void BinaryOpStub::GenerateSmiCode( // eax in case the result is not a smi. ASSERT(!left.is(ecx) && !right.is(ecx)); __ mov(ecx, right); - __ or_(right, Operand(left)); // Bitwise or is commutative. + __ or_(right, left); // Bitwise or is commutative. combined = right; break; @@ -1034,7 +1148,7 @@ void BinaryOpStub::GenerateSmiCode( case Token::DIV: case Token::MOD: __ mov(combined, right); - __ or_(combined, Operand(left)); + __ or_(combined, left); break; case Token::SHL: @@ -1044,7 +1158,7 @@ void BinaryOpStub::GenerateSmiCode( // for the smi check register. ASSERT(!left.is(ecx) && !right.is(ecx)); __ mov(ecx, right); - __ or_(right, Operand(left)); + __ or_(right, left); combined = right; break; @@ -1067,12 +1181,12 @@ void BinaryOpStub::GenerateSmiCode( case Token::BIT_XOR: ASSERT(right.is(eax)); - __ xor_(right, Operand(left)); // Bitwise xor is commutative. + __ xor_(right, left); // Bitwise xor is commutative. break; case Token::BIT_AND: ASSERT(right.is(eax)); - __ and_(right, Operand(left)); // Bitwise and is commutative. + __ and_(right, left); // Bitwise and is commutative. break; case Token::SHL: @@ -1121,12 +1235,12 @@ void BinaryOpStub::GenerateSmiCode( case Token::ADD: ASSERT(right.is(eax)); - __ add(right, Operand(left)); // Addition is commutative. + __ add(right, left); // Addition is commutative. __ j(overflow, &use_fp_on_smis); break; case Token::SUB: - __ sub(left, Operand(right)); + __ sub(left, right); __ j(overflow, &use_fp_on_smis); __ mov(eax, left); break; @@ -1140,7 +1254,7 @@ void BinaryOpStub::GenerateSmiCode( // Remove tag from one of the operands (but keep sign). __ SmiUntag(right); // Do multiplication. - __ imul(right, Operand(left)); // Multiplication is commutative. + __ imul(right, left); // Multiplication is commutative. __ j(overflow, &use_fp_on_smis); // Check for negative zero result. Use combined = left | right. __ NegativeZeroTest(right, combined, &use_fp_on_smis); @@ -1151,7 +1265,7 @@ void BinaryOpStub::GenerateSmiCode( // save the left operand. __ mov(edi, left); // Check for 0 divisor. - __ test(right, Operand(right)); + __ test(right, right); __ j(zero, &use_fp_on_smis); // Sign extend left into edx:eax. ASSERT(left.is(eax)); @@ -1167,7 +1281,7 @@ void BinaryOpStub::GenerateSmiCode( // Check for negative zero result. Use combined = left | right. __ NegativeZeroTest(eax, combined, &use_fp_on_smis); // Check that the remainder is zero. - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(not_zero, &use_fp_on_smis); // Tag the result and store it in register eax. __ SmiTag(eax); @@ -1175,7 +1289,7 @@ void BinaryOpStub::GenerateSmiCode( case Token::MOD: // Check for 0 divisor. - __ test(right, Operand(right)); + __ test(right, right); __ j(zero, ¬_smis); // Sign extend left into edx:eax. @@ -1226,11 +1340,11 @@ void BinaryOpStub::GenerateSmiCode( break; case Token::ADD: // Revert right = right + left. - __ sub(right, Operand(left)); + __ sub(right, left); break; case Token::SUB: // Revert left = left - right. - __ add(left, Operand(right)); + __ add(left, right); break; case Token::MUL: // Right was clobbered but a copy is in ebx. @@ -1268,7 +1382,7 @@ void BinaryOpStub::GenerateSmiCode( ASSERT_EQ(Token::SHL, op_); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(left)); + __ cvtsi2sd(xmm0, left); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), left); @@ -1290,11 +1404,11 @@ void BinaryOpStub::GenerateSmiCode( switch (op_) { case Token::ADD: // Revert right = right + left. - __ sub(right, Operand(left)); + __ sub(right, left); break; case Token::SUB: // Revert left = left - right. - __ add(left, Operand(right)); + __ add(left, right); break; case Token::MUL: // Right was clobbered but a copy is in ebx. @@ -1486,7 +1600,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Check result type if it is currently Int32. if (result_type_ <= BinaryOpIC::INT32) { __ cvttsd2si(ecx, Operand(xmm0)); - __ cvtsi2sd(xmm2, Operand(ecx)); + __ cvtsi2sd(xmm2, ecx); __ ucomisd(xmm0, xmm2); __ j(not_zero, ¬_int32); __ j(carry, ¬_int32); @@ -1548,9 +1662,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, ¬_int32); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1574,7 +1688,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -1594,7 +1708,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -1675,7 +1789,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { __ cmp(edx, factory->undefined_value()); __ j(not_equal, &check, Label::kNear); if (Token::IsBitOp(op_)) { - __ xor_(edx, Operand(edx)); + __ xor_(edx, edx); } else { __ mov(edx, Immediate(factory->nan_value())); } @@ -1684,7 +1798,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { __ cmp(eax, factory->undefined_value()); __ j(not_equal, &done, Label::kNear); if (Token::IsBitOp(op_)) { - __ xor_(eax, Operand(eax)); + __ xor_(eax, eax); } else { __ mov(eax, Immediate(factory->nan_value())); } @@ -1762,9 +1876,9 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { use_sse3_, ¬_floats); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1788,7 +1902,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -1808,7 +1922,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -1961,9 +2075,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { use_sse3_, &call_runtime); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1987,7 +2101,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -2007,7 +2121,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -2117,10 +2231,10 @@ void BinaryOpStub::GenerateHeapResultAllocation( __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); // Now edx can be overwritten losing one of the arguments as we are // now done and will not need it any more. - __ mov(edx, Operand(ebx)); + __ mov(edx, ebx); __ bind(&skip_allocation); // Use object in edx as a result holder - __ mov(eax, Operand(edx)); + __ mov(eax, edx); break; } case OVERWRITE_RIGHT: @@ -2178,7 +2292,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Then load the low and high words of the double into ebx, edx. STATIC_ASSERT(kSmiTagSize == 1); __ sar(eax, 1); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ mov(Operand(esp, 0), eax); __ fild_s(Operand(esp, 0)); __ fst_d(Operand(esp, 0)); @@ -2189,7 +2303,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Check if input is a HeapNumber. __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); + __ cmp(ebx, Immediate(factory->heap_number_map())); __ j(not_equal, &runtime_call); // Input is a HeapNumber. Push it on the FPU stack and load its // low and high words into ebx, edx. @@ -2201,12 +2315,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { } else { // UNTAGGED. if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatures::Scope sse4_scope(SSE4_1); - __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. + __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. } else { __ pshufd(xmm0, xmm1, 0x1); - __ movd(Operand(edx), xmm0); + __ movd(edx, xmm0); } - __ movd(Operand(ebx), xmm1); + __ movd(ebx, xmm1); } // ST[0] or xmm1 == double value @@ -2215,15 +2329,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Compute hash (the shifts are arithmetic): // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); __ mov(ecx, ebx); - __ xor_(ecx, Operand(edx)); + __ xor_(ecx, edx); __ mov(eax, ecx); __ sar(eax, 16); - __ xor_(ecx, Operand(eax)); + __ xor_(ecx, eax); __ mov(eax, ecx); __ sar(eax, 8); - __ xor_(ecx, Operand(eax)); + __ xor_(ecx, eax); ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); - __ and_(Operand(ecx), + __ and_(ecx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); // ST[0] or xmm1 == double value. @@ -2238,7 +2352,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(eax, cache_array_index)); // Eax points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &runtime_call_clear_stack); #ifdef DEBUG // Check that the layout of cache elements match expectations. @@ -2281,10 +2395,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); } else { // UNTAGGED. __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); - __ sub(Operand(esp), Immediate(kDoubleSize)); + __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + __ add(esp, Immediate(kDoubleSize)); } GenerateOperation(masm); __ mov(Operand(ecx, 0), ebx); @@ -2299,20 +2413,21 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Skip cache and return answer directly, only in untagged case. __ bind(&skip_cache); - __ sub(Operand(esp), Immediate(kDoubleSize)); + __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); GenerateOperation(masm); __ fstp_d(Operand(esp, 0)); __ movdbl(xmm1, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + __ add(esp, Immediate(kDoubleSize)); // We return the value in xmm1 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - // Allocate an unused object bigger than a HeapNumber. - __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Allocate an unused object bigger than a HeapNumber. + __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } @@ -2329,10 +2444,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&runtime_call); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); - __ EnterInternalFrame(); - __ push(eax); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(eax); + __ CallRuntime(RuntimeFunction(), 1); + } __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2364,13 +2480,13 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { // If argument is outside the range -2^63..2^63, fsin/cos doesn't // work. We must reduce it to the appropriate range. __ mov(edi, edx); - __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. + __ and_(edi, Immediate(0x7ff00000)); // Exponent only. int supported_exponent_limit = (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; - __ cmp(Operand(edi), Immediate(supported_exponent_limit)); + __ cmp(edi, Immediate(supported_exponent_limit)); __ j(below, &in_range, Label::kNear); // Check for infinity and NaN. Both return NaN for sin. - __ cmp(Operand(edi), Immediate(0x7ff00000)); + __ cmp(edi, Immediate(0x7ff00000)); Label non_nan_result; __ j(not_equal, &non_nan_result, Label::kNear); // Input is +/-Infinity or NaN. Result is NaN. @@ -2379,7 +2495,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ push(Immediate(0x7ff80000)); __ push(Immediate(0)); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); __ jmp(&done, Label::kNear); __ bind(&non_nan_result); @@ -2395,7 +2511,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ fwait(); __ fnstsw_ax(); // Clear if Illegal Operand or Zero Division exceptions are set. - __ test(Operand(eax), Immediate(5)); + __ test(eax, Immediate(5)); __ j(zero, &no_exceptions, Label::kNear); __ fnclex(); __ bind(&no_exceptions); @@ -2408,7 +2524,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ fprem1(); __ fwait(); __ fnstsw_ax(); - __ test(Operand(eax), Immediate(0x400 /* C2 */)); + __ test(eax, Immediate(0x400 /* C2 */)); // If C2 is set, computation only has partial result. Loop to // continue computation. __ j(not_zero, &partial_remainder_loop); @@ -2541,13 +2657,13 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); + __ cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ bind(&done); @@ -2571,12 +2687,12 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, __ jmp(not_numbers); // Argument in eax is not a number. __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); + __ cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ jmp(&done, Label::kNear); __ bind(&load_float_eax); @@ -2592,11 +2708,11 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, __ mov(scratch, left); ASSERT(!scratch.is(right)); // We're about to clobber scratch. __ SmiUntag(scratch); - __ cvtsi2sd(xmm0, Operand(scratch)); + __ cvtsi2sd(xmm0, scratch); __ mov(scratch, right); __ SmiUntag(scratch); - __ cvtsi2sd(xmm1, Operand(scratch)); + __ cvtsi2sd(xmm1, scratch); } @@ -2604,12 +2720,12 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, Label* non_int32, Register scratch) { __ cvttsd2si(scratch, Operand(xmm0)); - __ cvtsi2sd(xmm2, Operand(scratch)); + __ cvtsi2sd(xmm2, scratch); __ ucomisd(xmm0, xmm2); __ j(not_zero, non_int32); __ j(carry, non_int32); __ cvttsd2si(scratch, Operand(xmm1)); - __ cvtsi2sd(xmm2, Operand(scratch)); + __ cvtsi2sd(xmm2, scratch); __ ucomisd(xmm1, xmm2); __ j(not_zero, non_int32); __ j(carry, non_int32); @@ -2717,7 +2833,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Save 1 in xmm3 - we need this several times later on. __ mov(ecx, Immediate(1)); - __ cvtsi2sd(xmm3, Operand(ecx)); + __ cvtsi2sd(xmm3, ecx); Label exponent_nonsmi; Label base_nonsmi; @@ -2728,7 +2844,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Optimized version when both exponent and base are smis. Label powi; __ SmiUntag(edx); - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ jmp(&powi); // exponent is smi and base is a heapnumber. __ bind(&base_nonsmi); @@ -2770,11 +2886,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { // base has the original value of the exponent - if the exponent is // negative return 1/result. - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(positive, &allocate_return); // Special case if xmm1 has reached infinity. __ mov(ecx, Immediate(0x7FB00000)); - __ movd(xmm0, Operand(ecx)); + __ movd(xmm0, ecx); __ cvtss2sd(xmm0, xmm0); __ ucomisd(xmm0, xmm1); __ j(equal, &call_runtime); @@ -2797,7 +2913,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label handle_special_cases; __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear); __ SmiUntag(edx); - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ jmp(&handle_special_cases, Label::kNear); __ bind(&base_not_smi); @@ -2806,7 +2922,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ j(not_equal, &call_runtime); __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); __ and_(ecx, HeapNumber::kExponentMask); - __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask)); + __ cmp(ecx, Immediate(HeapNumber::kExponentMask)); // base is NaN or +/-Infinity __ j(greater_equal, &call_runtime); __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); @@ -2817,7 +2933,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Test for -0.5. // Load xmm2 with -0.5. __ mov(ecx, Immediate(0xBF000000)); - __ movd(xmm2, Operand(ecx)); + __ movd(xmm2, ecx); __ cvtss2sd(xmm2, xmm2); // xmm2 now has -0.5. __ ucomisd(xmm2, xmm1); @@ -2873,13 +2989,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { Label adaptor; __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor, Label::kNear); // Check index against formal parameters count limit passed in // through register eax. Use unsigned comparison to get negative // check for free. - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(above_equal, &slow, Label::kNear); // Read the argument from the stack and return it. @@ -2895,7 +3011,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // comparison to get negative check for free. __ bind(&adaptor); __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(edx, Operand(ecx)); + __ cmp(edx, ecx); __ j(above_equal, &slow, Label::kNear); // Read the argument from the stack and return it. @@ -2926,7 +3042,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { Label runtime; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(not_equal, &runtime, Label::kNear); // Patch the arguments.length and the parameters pointer. @@ -2957,7 +3073,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Label adaptor_frame, try_allocate; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // No adaptor, parameter count = argument count. @@ -2976,7 +3092,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { // esp[4] = parameter count (tagged) // esp[8] = address of receiver argument // Compute the mapped parameter count = min(ebx, ecx) in ebx. - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(less_equal, &try_allocate, Label::kNear); __ mov(ebx, ecx); @@ -2990,7 +3106,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { const int kParameterMapHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; Label no_parameter_map; - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &no_parameter_map, Label::kNear); __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); __ bind(&no_parameter_map); @@ -2999,7 +3115,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // 3. Arguments object. - __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize)); + __ add(ebx, Immediate(Heap::kArgumentsObjectSize)); // Do the allocation of all three objects in one go. __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT); @@ -3014,7 +3130,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); __ mov(ebx, Operand(esp, 0 * kPointerSize)); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(not_zero, &has_mapped_parameters, Label::kNear); __ mov(edi, Operand(edi, Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX))); @@ -3069,7 +3185,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { // Initialize parameter map. If there are no mapped arguments, we're done. Label skip_parameter_map; - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &skip_parameter_map); __ mov(FieldOperand(edi, FixedArray::kMapOffset), @@ -3093,7 +3209,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(eax, Operand(esp, 2 * kPointerSize)); __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ add(ebx, Operand(esp, 4 * kPointerSize)); - __ sub(ebx, Operand(eax)); + __ sub(ebx, eax); __ mov(ecx, FACTORY->the_hole_value()); __ mov(edx, edi); __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); @@ -3110,12 +3226,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ jmp(¶meters_test, Label::kNear); __ bind(¶meters_loop); - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); - __ add(Operand(ebx), Immediate(Smi::FromInt(1))); + __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(¶meters_test); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, ¶meters_loop, Label::kNear); __ pop(ecx); @@ -3135,18 +3251,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Label arguments_loop, arguments_test; __ mov(ebx, Operand(esp, 1 * kPointerSize)); __ mov(edx, Operand(esp, 4 * kPointerSize)); - __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling? - __ sub(Operand(edx), ebx); + __ sub(edx, ebx); // Is there a smarter way to do negative scaling? + __ sub(edx, ebx); __ jmp(&arguments_test, Label::kNear); __ bind(&arguments_loop); - __ sub(Operand(edx), Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); __ mov(eax, Operand(edx, 0)); __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); - __ add(Operand(ebx), Immediate(Smi::FromInt(1))); + __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(&arguments_test); - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(less, &arguments_loop, Label::kNear); // Restore. @@ -3174,7 +3290,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { Label adaptor_frame, try_allocate, runtime; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // Get the length from the frame. @@ -3193,11 +3309,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // the arguments object and the elements array. Label add_arguments_object; __ bind(&try_allocate); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &add_arguments_object, Label::kNear); __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); __ bind(&add_arguments_object); - __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict)); + __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict)); // Do the allocation of both objects in one go. __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); @@ -3224,7 +3340,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // If there are no actual arguments, we're done. Label done; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &done, Label::kNear); // Get the parameters pointer from the stack. @@ -3246,8 +3362,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ bind(&loop); __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); - __ add(Operand(edi), Immediate(kPointerSize)); - __ sub(Operand(edx), Immediate(kPointerSize)); + __ add(edi, Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); __ dec(ecx); __ j(not_zero, &loop); @@ -3268,10 +3384,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } // Stack frame on entry. // esp[0]: return address @@ -3294,7 +3406,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference address_of_regexp_stack_memory_size = ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &runtime); // Check that the first argument is a JSRegExp object. @@ -3315,7 +3427,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // ecx: RegExp data (FixedArray) // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); + __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); __ j(not_equal, &runtime); // ecx: RegExp data (FixedArray) @@ -3325,7 +3437,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // uses the asumption that smis are 2 * their untagged value. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. + __ add(edx, Immediate(2)); // edx was a smi. // Check that the static offsets vector buffer is large enough. __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); __ j(above, &runtime); @@ -3347,7 +3459,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // string length. A negative value will be greater (unsigned comparison). __ mov(eax, Operand(esp, kPreviousIndexOffset)); __ JumpIfNotSmi(eax, &runtime); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(above_equal, &runtime); // ecx: RegExp data (FixedArray) @@ -3367,8 +3479,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // additional information. __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); __ SmiUntag(eax); - __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmp(edx, Operand(eax)); + __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, eax); __ j(greater, &runtime); // Reset offset for possibly sliced string. @@ -3385,8 +3497,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); __ j(zero, &seq_two_byte_string, Label::kNear); // Any other flat string must be a flat ascii string. - __ and_(Operand(ebx), - Immediate(kIsNotStringMask | kStringRepresentationMask)); + __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask)); __ j(zero, &seq_ascii_string, Label::kNear); // Check for flat cons string or sliced string. @@ -3398,7 +3509,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label cons_string, check_encoding; STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); - __ cmp(Operand(ebx), Immediate(kExternalStringTag)); + __ cmp(ebx, Immediate(kExternalStringTag)); __ j(less, &cons_string); __ j(equal, &runtime); @@ -3504,14 +3615,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Prepare start and end index of the input. // Load the length from the original sliced string if that is the case. __ mov(esi, FieldOperand(esi, String::kLengthOffset)); - __ add(esi, Operand(edi)); // Calculate input end wrt offset. + __ add(esi, edi); // Calculate input end wrt offset. __ SmiUntag(edi); - __ add(ebx, Operand(edi)); // Calculate input start wrt offset. + __ add(ebx, edi); // Calculate input start wrt offset. // ebx: start index of the input string // esi: end index of the input string Label setup_two_byte, setup_rest; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &setup_two_byte, Label::kNear); __ SmiUntag(esi); __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize)); @@ -3531,8 +3642,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&setup_rest); // Locate the code entry and call it. - __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ call(Operand(edx)); + __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ call(edx); // Drop arguments and come back to JS mode. __ LeaveApiExitFrame(); @@ -3553,11 +3664,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // TODO(592): Rerunning the RegExp to get the stack overflow exception. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, masm->isolate()); - __ mov(edx, - Operand::StaticVariable(ExternalReference::the_hole_value_location( - masm->isolate()))); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); __ mov(eax, Operand::StaticVariable(pending_exception)); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(equal, &runtime); // For exception, throw the exception again. @@ -3578,7 +3687,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&failure); // For failure to match, return null. - __ mov(Operand(eax), factory->null_value()); + __ mov(eax, factory->null_value()); __ ret(4 * kPointerSize); // Load RegExp data. @@ -3589,7 +3698,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Calculate number of capture registers (number_of_captures + 1) * 2. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. + __ add(edx, Immediate(2)); // edx was a smi. // edx: Number of capture registers // Load last_match_info which is still known to be a fast case JSArray. @@ -3605,12 +3714,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store last subject and last input. __ mov(eax, Operand(esp, kSubjectOffset)); __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); + __ RecordWriteField(ebx, + RegExpImpl::kLastSubjectOffset, + eax, + edi, + kDontSaveFPRegs); __ mov(eax, Operand(esp, kSubjectOffset)); __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); + __ RecordWriteField(ebx, + RegExpImpl::kLastInputOffset, + eax, + edi, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -3624,7 +3739,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Capture register counter starts from number of capture registers and // counts down until wraping after zero. __ bind(&next_capture); - __ sub(Operand(edx), Immediate(1)); + __ sub(edx, Immediate(1)); __ j(negative, &done, Label::kNear); // Read the value from the static offsets vector buffer. __ mov(edi, Operand(ecx, edx, times_int_size, 0)); @@ -3655,7 +3770,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { Label done; __ mov(ebx, Operand(esp, kPointerSize * 3)); __ JumpIfNotSmi(ebx, &slowcase); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength))); + __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength))); __ j(above, &slowcase); // Smi-tagging is equivalent to multiplying by 2. STATIC_ASSERT(kSmiTag == 0); @@ -3715,10 +3830,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // ebx: Start of elements in FixedArray. // edx: the hole. Label loop; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ bind(&loop); __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero. - __ sub(Operand(ecx), Immediate(1)); + __ sub(ecx, Immediate(1)); __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); __ jmp(&loop); @@ -3752,7 +3867,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, // contains two elements (number and string) for each cache entry. __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. - __ sub(Operand(mask), Immediate(1)); // Make mask. + __ sub(mask, Immediate(1)); // Make mask. // Calculate the entry in the number string cache. The hash value in the // number string cache for smis is just the smi value, and the hash for @@ -3778,7 +3893,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); // Object is heap number and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); Register index = scratch; Register probe = mask; __ mov(probe, @@ -3804,7 +3919,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ bind(&smi_hash_calculated); // Object is smi and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); Register index = scratch; // Check if the entry is the smi we are looking for. __ cmp(object, @@ -3856,10 +3971,10 @@ void CompareStub::Generate(MacroAssembler* masm) { // Compare two smis if required. if (include_smi_compare_) { Label non_smi, smi_done; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); - __ sub(edx, Operand(eax)); // Return on the result of the subtraction. + __ sub(edx, eax); // Return on the result of the subtraction. __ j(no_overflow, &smi_done, Label::kNear); __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. __ bind(&smi_done); @@ -3867,8 +3982,8 @@ void CompareStub::Generate(MacroAssembler* masm) { __ ret(0); __ bind(&non_smi); } else if (FLAG_debug_code) { - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, "Unexpected smi operands."); } @@ -3880,7 +3995,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // for NaN and undefined. { Label not_identical; - __ cmp(eax, Operand(edx)); + __ cmp(eax, edx); __ j(not_equal, ¬_identical); if (cc_ != equal) { @@ -3929,7 +4044,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ Set(eax, Immediate(0)); // Shift value and mask so kQuietNaNHighBitsMask applies to topmost // bits. - __ add(edx, Operand(edx)); + __ add(edx, edx); __ cmp(edx, kQuietNaNHighBitsMask << 1); if (cc_ == equal) { STATIC_ASSERT(EQUAL != 1); @@ -3963,19 +4078,19 @@ void CompareStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); - __ and_(ecx, Operand(eax)); - __ test(ecx, Operand(edx)); + __ and_(ecx, eax); + __ test(ecx, edx); __ j(not_zero, ¬_smis, Label::kNear); // One operand is a smi. // Check whether the non-smi is a heap number. STATIC_ASSERT(kSmiTagMask == 1); // ecx still holds eax & kSmiTag, which is either zero or one. - __ sub(Operand(ecx), Immediate(0x01)); + __ sub(ecx, Immediate(0x01)); __ mov(ebx, edx); - __ xor_(ebx, Operand(eax)); - __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. - __ xor_(ebx, Operand(eax)); + __ xor_(ebx, eax); + __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. + __ xor_(ebx, eax); // if eax was smi, ebx is now edx, else eax. // Check if the non-smi operand is a heap number. @@ -4037,9 +4152,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Return a result of -1, 0, or 1, based on EFLAGS. __ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); + __ cmov(above, eax, ecx); __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); + __ cmov(below, eax, ecx); __ ret(0); } else { FloatingPointHelper::CheckFloatOperands( @@ -4198,25 +4313,49 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void CallFunctionStub::FinishCode(Code* code) { + code->set_has_function_cache(RecordCallTarget()); +} + + +void CallFunctionStub::Clear(Heap* heap, Address address) { + ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte); + // 1 ~ size of the test eax opcode. + Object* cell = Memory::Object_at(address + kPointerSize + 1); + // Low-level because clearing happens during GC. + reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value( + RawUninitializedSentinel(heap)); +} + + +Object* CallFunctionStub::GetCachedValue(Address address) { + ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte); + // 1 ~ size of the test eax opcode. + Object* cell = Memory::Object_at(address + kPointerSize + 1); + return JSGlobalPropertyCell::cast(cell)->value(); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { + Isolate* isolate = masm->isolate(); Label slow, non_function; // The receiver might implicitly be the global object. This is // indicated by passing the hole as the receiver to the call // function stub. if (ReceiverMightBeImplicit()) { - Label call; + Label receiver_ok; // Get the receiver from the stack. // +1 ~ return address __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); // Call as function is indicated with the hole. - __ cmp(eax, masm->isolate()->factory()->the_hole_value()); - __ j(not_equal, &call, Label::kNear); + __ cmp(eax, isolate->factory()->the_hole_value()); + __ j(not_equal, &receiver_ok, Label::kNear); // Patch the receiver on the stack with the global receiver object. __ mov(ebx, GlobalObjectOperand()); __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx); - __ bind(&call); + __ bind(&receiver_ok); } // Get the function to call from the stack. @@ -4229,12 +4368,53 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &slow); + if (RecordCallTarget()) { + // Cache the called function in a global property cell in the + // instruction stream after the call. Cache states are uninitialized, + // monomorphic (indicated by a JSFunction), and megamorphic. + Label initialize, call; + // Load the cache cell address into ebx and the cache state into ecx. + __ mov(ebx, Operand(esp, 0)); // Return address. + __ mov(ebx, Operand(ebx, 1)); // 1 ~ sizeof 'test eax' opcode in bytes. + __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(ecx, edi); + __ j(equal, &call, Label::kNear); + __ cmp(ecx, Immediate(MegamorphicSentinel(isolate))); + __ j(equal, &call, Label::kNear); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ cmp(ecx, Immediate(UninitializedSentinel(isolate))); + __ j(equal, &initialize, Label::kNear); + // MegamorphicSentinel is a root so no write-barrier is needed. + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(MegamorphicSentinel(isolate))); + __ jmp(&call, Label::kNear); + + // An uninitialized cache is patched with the function. + __ bind(&initialize); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi); + __ mov(ecx, edi); + __ RecordWriteField(ebx, + JSGlobalPropertyCell::kValueOffset, + ecx, + edx, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, // Cells are rescanned. + OMIT_SMI_CHECK); + + __ bind(&call); + } + // Fast-case: Just invoke the function. ParameterCount actual(argc_); if (ReceiverMightBeImplicit()) { Label call_as_function; - __ cmp(eax, masm->isolate()->factory()->the_hole_value()); + __ cmp(eax, isolate->factory()->the_hole_value()); __ j(equal, &call_as_function); __ InvokeFunction(edi, actual, @@ -4251,6 +4431,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Slow-case: Non-function called. __ bind(&slow); + if (RecordCallTarget()) { + // If there is a call target cache, mark it megamorphic in the + // non-function case. + __ mov(ebx, Operand(esp, 0)); + __ mov(ebx, Operand(ebx, 1)); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(MegamorphicSentinel(isolate))); + } // Check for function proxy. __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); __ j(not_equal, &non_function); @@ -4262,8 +4450,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ SetCallKind(ecx, CALL_AS_FUNCTION); __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); __ jmp(adaptor, RelocInfo::CODE_TARGET); } @@ -4275,8 +4462,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ Set(ebx, Immediate(0)); __ SetCallKind(ecx, CALL_AS_METHOD); __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); __ jmp(adaptor, RelocInfo::CODE_TARGET); } @@ -4286,6 +4472,35 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + // It is important that the store buffer overflow stubs are generated first. + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle<Code> code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle<Code> code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(eax); } @@ -4332,7 +4547,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address())); - __ call(Operand(ebx)); + __ call(ebx); // Result is in eax or edx:eax - do not destroy these registers! if (always_allocate_scope) { @@ -4364,8 +4579,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // should have returned some failure value. if (FLAG_debug_code) { __ push(edx); - __ mov(edx, Operand::StaticVariable( - ExternalReference::the_hole_value_location(masm->isolate()))); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); Label okay; __ cmp(edx, Operand::StaticVariable(pending_exception_address)); // Cannot use check here as it attempts to generate call into runtime. @@ -4376,7 +4590,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles_); + __ LeaveExitFrame(save_doubles_ == kSaveFPRegs); __ ret(0); // Handling of failure. @@ -4393,10 +4607,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ j(equal, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - ExternalReference the_hole_location = - ExternalReference::the_hole_value_location(masm->isolate()); __ mov(eax, Operand::StaticVariable(pending_exception_address)); - __ mov(edx, Operand::StaticVariable(the_hole_location)); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception_address), edx); // Special handling of termination exceptions which are uncatchable @@ -4431,7 +4643,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { // a garbage collection and retrying the builtin (twice). // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(save_doubles_); + __ EnterExitFrame(save_doubles_ == kSaveFPRegs); // eax: result parameter for PerformGC, if any (setup below) // ebx: pointer to builtin function (C callee-saved) @@ -4487,7 +4699,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Setup frame. __ push(ebp); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // Push marker in two places. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; @@ -4531,9 +4743,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); // Clear any pending exceptions. - ExternalReference the_hole_location = - ExternalReference::the_hole_value_location(masm->isolate()); - __ mov(edx, Operand::StaticVariable(the_hole_location)); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception), edx); // Fake a receiver (NULL). @@ -4555,7 +4765,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } __ mov(edx, Operand(edx, 0)); // deref address __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); - __ call(Operand(edx)); + __ call(edx); // Unlink this frame from the handler chain. __ PopTryHandler(); @@ -4563,8 +4773,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ bind(&exit); // Check if the current stack frame is marked as the outermost JS frame. __ pop(ebx); - __ cmp(Operand(ebx), - Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); __ j(not_equal, ¬_outermost_js_2); __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ bind(¬_outermost_js_2); @@ -4578,7 +4787,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ pop(ebx); __ pop(esi); __ pop(edi); - __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers + __ add(esp, Immediate(2 * kPointerSize)); // remove markers // Restore frame pointer and return. __ pop(ebp); @@ -4694,10 +4903,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); Label loop, is_instance, is_not_instance; __ bind(&loop); - __ cmp(scratch, Operand(prototype)); + __ cmp(scratch, prototype); __ j(equal, &is_instance, Label::kNear); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(scratch), Immediate(factory->null_value())); + __ cmp(scratch, Immediate(factory->null_value())); __ j(equal, &is_not_instance, Label::kNear); __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); @@ -4788,13 +4997,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { // Call the builtin and convert 0/1 to true/false. - __ EnterInternalFrame(); - __ push(object); - __ push(function); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(object); + __ push(function); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } Label true_value, done; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &true_value, Label::kNear); __ mov(eax, factory->false_value()); __ jmp(&done, Label::kNear); @@ -4905,22 +5115,24 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Immediate(masm->isolate()->factory()->empty_string())); __ j(not_equal, &call_runtime_); // Get the first of the two strings and load its instance type. - __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); + __ mov(result_, FieldOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string, Label::kNear); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset)); - __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset)); + __ mov(result_, FieldOperand(object_, SlicedString::kParentOffset)); // Assure that we are dealing with a sequential string. Go to runtime if not. __ bind(&assure_seq_string); - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ mov(result_, FieldOperand(result_, HeapObject::kMapOffset)); __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSeqStringTag == 0); __ test(result_, Immediate(kStringRepresentationMask)); __ j(not_zero, &call_runtime_); - __ jmp(&flat_string, Label::kNear); + // Actually fetch the parent string if it is confirmed to be sequential. + STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset); + __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset)); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -5110,7 +5322,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { Label second_not_zero_length, both_not_zero_length; __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); STATIC_ASSERT(kSmiTag == 0); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, &second_not_zero_length, Label::kNear); // Second string is empty, result is first string which is already in eax. Counters* counters = masm->isolate()->counters(); @@ -5119,7 +5331,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&second_not_zero_length); __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); STATIC_ASSERT(kSmiTag == 0); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(not_zero, &both_not_zero_length, Label::kNear); // First string is empty, result is second string which is in edx. __ mov(eax, edx); @@ -5134,13 +5346,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; __ bind(&both_not_zero_length); - __ add(ebx, Operand(ecx)); + __ add(ebx, ecx); STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); // Handle exceptionally long strings in the runtime system. __ j(overflow, &string_add_runtime); // Use the symbol table when adding two one character strings, as it // helps later optimizations to return a symbol here. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); + __ cmp(ebx, Immediate(Smi::FromInt(2))); __ j(not_equal, &longer_than_two); // Check that both strings are non-external ascii strings. @@ -5177,7 +5389,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { &string_add_runtime); // Pack both characters in ebx. __ shl(ecx, kBitsPerByte); - __ or_(ebx, Operand(ecx)); + __ or_(ebx, ecx); // Set the characters in the new string. __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); __ IncrementCounter(counters->string_add_native(), 1); @@ -5185,7 +5397,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&longer_than_two); // Check if resulting string will be flat. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); + __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength))); __ j(below, &string_add_flat_result); // If result is not supposed to be flat allocate a cons string object. If both @@ -5195,7 +5407,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ and_(ecx, Operand(edi)); + __ and_(ecx, edi); STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(ecx, Immediate(kStringEncodingMask)); @@ -5223,7 +5435,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ j(not_zero, &ascii_data); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ xor_(edi, Operand(ecx)); + __ xor_(edi, ecx); STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); @@ -5271,12 +5483,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Load first argument and locate first character. __ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: first character of result // edx: first char of first argument @@ -5286,7 +5498,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edx, Operand(esp, 1 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: next character of result // edx: first char of second argument @@ -5310,13 +5522,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(Operand(ecx), + __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load first argument and locate first character. __ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), + __ add(edx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: first character of result @@ -5327,7 +5539,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edx, Operand(esp, 1 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: next character of result // edx: first char of second argument @@ -5403,15 +5615,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, if (ascii) { __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); + __ add(src, Immediate(1)); + __ add(dest, Immediate(1)); } else { __ mov_w(scratch, Operand(src, 0)); __ mov_w(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(2)); - __ add(Operand(dest), Immediate(2)); + __ add(src, Immediate(2)); + __ add(dest, Immediate(2)); } - __ sub(Operand(count), Immediate(1)); + __ sub(count, Immediate(1)); __ j(not_zero, &loop); } @@ -5434,7 +5646,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Nothing to do for zero characters. Label done; - __ test(count, Operand(count)); + __ test(count, count); __ j(zero, &done); // Make count the number of bytes to copy. @@ -5459,7 +5671,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Check if there are more bytes to copy. __ bind(&last_bytes); - __ test(count, Operand(count)); + __ test(count, count); __ j(zero, &done); // Copy remaining characters. @@ -5467,9 +5679,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, __ bind(&loop); __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - __ sub(Operand(count), Immediate(1)); + __ add(src, Immediate(1)); + __ add(dest, Immediate(1)); + __ sub(count, Immediate(1)); __ j(not_zero, &loop); __ bind(&done); @@ -5491,12 +5703,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // different hash algorithm. Don't try to look for these in the symbol table. Label not_array_index; __ mov(scratch, c1); - __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); - __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); + __ sub(scratch, Immediate(static_cast<int>('0'))); + __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); __ j(above, ¬_array_index, Label::kNear); __ mov(scratch, c2); - __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); - __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); + __ sub(scratch, Immediate(static_cast<int>('0'))); + __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); __ j(below_equal, not_probed); __ bind(¬_array_index); @@ -5509,7 +5721,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Collect the two characters in a register. Register chars = c1; __ shl(c2, kBitsPerByte); - __ or_(chars, Operand(c2)); + __ or_(chars, c2); // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. @@ -5526,7 +5738,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register mask = scratch2; __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); __ SmiUntag(mask); - __ sub(Operand(mask), Immediate(1)); + __ sub(mask, Immediate(1)); // Registers // chars: two character string, char 1 in byte 0 and char 2 in byte 1. @@ -5543,9 +5755,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Calculate entry in symbol table. __ mov(scratch, hash); if (i > 0) { - __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); + __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i))); } - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); // Load the entry from the symbol table. Register candidate = scratch; // Scratch register contains candidate. @@ -5582,7 +5794,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); __ and_(temp, 0x0000ffff); - __ cmp(chars, Operand(temp)); + __ cmp(chars, temp); __ j(equal, &found_in_symbol_table); __ bind(&next_probe_pop_mask[i]); __ pop(mask); @@ -5609,11 +5821,11 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, // hash = character + (character << 10); __ mov(hash, character); __ shl(hash, 10); - __ add(hash, Operand(character)); + __ add(hash, character); // hash ^= hash >> 6; __ mov(scratch, hash); __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); } @@ -5622,15 +5834,15 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, Register character, Register scratch) { // hash += character; - __ add(hash, Operand(character)); + __ add(hash, character); // hash += hash << 10; __ mov(scratch, hash); __ shl(scratch, 10); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // hash ^= hash >> 6; __ mov(scratch, hash); __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); } @@ -5640,19 +5852,19 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, // hash += hash << 3; __ mov(scratch, hash); __ shl(scratch, 3); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // hash ^= hash >> 11; __ mov(scratch, hash); __ sar(scratch, 11); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); // hash += hash << 15; __ mov(scratch, hash); __ shl(scratch, 15); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // if (hash == 0) hash = 27; Label hash_not_zero; - __ test(hash, Operand(hash)); + __ test(hash, hash); __ j(not_zero, &hash_not_zero, Label::kNear); __ mov(hash, Immediate(27)); __ bind(&hash_not_zero); @@ -5684,7 +5896,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ JumpIfNotSmi(ecx, &runtime); __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. __ JumpIfNotSmi(edx, &runtime); - __ sub(ecx, Operand(edx)); + __ sub(ecx, edx); __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); Label return_eax; __ j(equal, &return_eax); @@ -5816,13 +6028,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from __ SmiUntag(ebx); - __ add(esi, Operand(ebx)); + __ add(esi, ebx); // eax: result string // ecx: result length @@ -5851,18 +6063,17 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(Operand(edi), + __ add(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from // As from is a smi it is 2 times the value which matches the size of a two // byte character. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(esi, Operand(ebx)); + __ add(esi, ebx); // eax: result string // ecx: result length @@ -5902,7 +6113,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); - __ test(length, Operand(length)); + __ test(length, length); __ j(not_zero, &compare_chars, Label::kNear); __ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); @@ -5937,14 +6148,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ j(less_equal, &left_shorter, Label::kNear); // Right string is shorter. Change scratch1 to be length of right string. - __ sub(scratch1, Operand(length_delta)); + __ sub(scratch1, length_delta); __ bind(&left_shorter); Register min_length = scratch1; // If either length is zero, just compare lengths. Label compare_lengths; - __ test(min_length, Operand(min_length)); + __ test(min_length, min_length); __ j(zero, &compare_lengths, Label::kNear); // Compare characters. @@ -5954,7 +6165,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); - __ test(length_delta, Operand(length_delta)); + __ test(length_delta, length_delta); __ j(not_zero, &result_not_equal, Label::kNear); // Result is EQUAL. @@ -6003,7 +6214,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( __ mov_b(scratch, Operand(left, index, times_1, 0)); __ cmpb(scratch, Operand(right, index, times_1, 0)); __ j(not_equal, chars_not_equal, chars_not_equal_near); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ j(not_zero, &loop); } @@ -6020,7 +6231,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(esp, 1 * kPointerSize)); // right Label not_same; - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(not_equal, ¬_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -6036,7 +6247,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { // Compare flat ascii strings. // Drop arguments from the stack. __ pop(ecx); - __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); __ push(ecx); GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); @@ -6050,16 +6261,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) { ASSERT(state_ == CompareIC::SMIS); Label miss; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &miss, Label::kNear); if (GetCondition() == equal) { // For equality we do not care about the sign of the result. - __ sub(eax, Operand(edx)); + __ sub(eax, edx); } else { Label done; - __ sub(edx, Operand(eax)); + __ sub(edx, eax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. __ not_(edx); @@ -6079,8 +6290,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { Label generic_stub; Label unordered; Label miss; - __ mov(ecx, Operand(edx)); - __ and_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ and_(ecx, eax); __ JumpIfSmi(ecx, &generic_stub, Label::kNear); __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); @@ -6108,9 +6319,9 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { // Performing mov, because xor would destroy the flag register. __ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); + __ cmov(above, eax, ecx); __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); + __ cmov(below, eax, ecx); __ ret(0); __ bind(&unordered); @@ -6137,9 +6348,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { // Check that both operands are heap objects. Label miss; - __ mov(tmp1, Operand(left)); + __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); - __ and_(tmp1, Operand(right)); + __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss, Label::kNear); // Check that both operands are symbols. @@ -6148,13 +6359,13 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, Operand(tmp2)); + __ and_(tmp1, tmp2); __ test(tmp1, Immediate(kIsSymbolMask)); __ j(zero, &miss, Label::kNear); // Symbols are compared by identity. Label done; - __ cmp(left, Operand(right)); + __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. ASSERT(right.is(eax)); @@ -6183,9 +6394,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { Register tmp3 = edi; // Check that both operands are heap objects. - __ mov(tmp1, Operand(left)); + __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); - __ and_(tmp1, Operand(right)); + __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss); // Check that both operands are strings. This leaves the instance @@ -6196,13 +6407,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ mov(tmp3, tmp1); STATIC_ASSERT(kNotStringTag != 0); - __ or_(tmp3, Operand(tmp2)); + __ or_(tmp3, tmp2); __ test(tmp3, Immediate(kIsNotStringMask)); __ j(not_zero, &miss); // Fast check for identical strings. Label not_same; - __ cmp(left, Operand(right)); + __ cmp(left, right); __ j(not_equal, ¬_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -6216,7 +6427,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // because we already know they are not identical. Label do_compare; STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, Operand(tmp2)); + __ and_(tmp1, tmp2); __ test(tmp1, Immediate(kIsSymbolMask)); __ j(zero, &do_compare, Label::kNear); // Make sure eax is non-zero. At this point input operands are @@ -6249,8 +6460,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { ASSERT(state_ == CompareIC::OBJECTS); Label miss; - __ mov(ecx, Operand(edx)); - __ and_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ and_(ecx, eax); __ JumpIfSmi(ecx, &miss, Label::kNear); __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); @@ -6259,7 +6470,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { __ j(not_equal, &miss, Label::kNear); ASSERT(GetCondition() == equal); - __ sub(eax, Operand(edx)); + __ sub(eax, edx); __ ret(0); __ bind(&miss); @@ -6274,15 +6485,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ push(eax); __ push(ecx); - // Call the runtime system in a fresh internal frame. - ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), - masm->isolate()); - __ EnterInternalFrame(); - __ push(edx); - __ push(eax); - __ push(Immediate(Smi::FromInt(op_))); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); + { + // Call the runtime system in a fresh internal frame. + ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), + masm->isolate()); + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edx); + __ push(eax); + __ push(Immediate(Smi::FromInt(op_))); + __ CallExternalReference(miss, 3); + } // Compute the entry point of the rewritten stub. __ lea(edi, FieldOperand(eax, Code::kHeaderSize)); @@ -6294,7 +6506,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ push(ecx); // Do a tail call to the rewritten stub. - __ jmp(Operand(edi)); + __ jmp(edi); } @@ -6323,8 +6535,8 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( // Capacity is smi 2^n. __ mov(index, FieldOperand(properties, kCapacityOffset)); __ dec(index); - __ and_(Operand(index), - Immediate(Smi::FromInt(name->Hash() + + __ and_(index, + Immediate(Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. @@ -6357,7 +6569,7 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( __ push(Immediate(name->Hash())); MaybeObject* result = masm->TryCallStub(&stub); if (result->IsFailure()) return result; - __ test(r0, Operand(r0)); + __ test(r0, r0); __ j(not_zero, miss); __ jmp(done); return result; @@ -6390,9 +6602,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ mov(r0, FieldOperand(name, String::kHashFieldOffset)); __ shr(r0, String::kHashShift); if (i > 0) { - __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i))); + __ add(r0, Immediate(StringDictionary::GetProbeOffset(i))); } - __ and_(r0, Operand(r1)); + __ and_(r0, r1); // Scale the index by multiplying by the entry size. ASSERT(StringDictionary::kEntrySize == 3); @@ -6416,13 +6628,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ push(r0); __ CallStub(&stub); - __ test(r1, Operand(r1)); + __ test(r1, r1); __ j(zero, miss); __ jmp(done); } void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: // esp[0 * kPointerSize]: return address. // esp[1 * kPointerSize]: key's hash. @@ -6453,8 +6667,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(scratch, Operand(esp, 2 * kPointerSize)); if (i > 0) { - __ add(Operand(scratch), - Immediate(StringDictionary::GetProbeOffset(i))); + __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i))); } __ and_(scratch, Operand(esp, 0)); @@ -6510,6 +6723,275 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { ebx, eax, edi, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + { ebx, ecx, edx, EMIT_REMEMBERED_SET }, + { ebx, edi, edx, OMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal and CallFunctionStub. + { ebx, ecx, edx, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField and + // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { edx, ecx, ebx, EMIT_REMEMBERED_SET }, + // GenerateStoreField calls the stub with two different permutations of + // registers. This is the second. + { ebx, ecx, edx, EMIT_REMEMBERED_SET }, + // StoreIC::GenerateNormal via GenerateDictionaryStore + { ebx, edi, edx, EMIT_REMEMBERED_SET }, + // KeyedStoreIC::GenerateGeneric. + { ebx, edx, ecx, EMIT_REMEMBERED_SET}, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { edi, edx, ecx, EMIT_REMEMBERED_SET}, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); + + CpuFeatures::TryForceFeatureScope scope(SSE2); + if (CpuFeatures::IsSupported(SSE2)) { + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode()->set_is_pregenerated(true); + } +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + __ jmp(&skip_to_incremental_noncompacting, Label::kNear); + __ jmp(&skip_to_incremental_compacting, Label::kFar); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + masm->set_byte_at(0, kTwoByteNopInstruction); + masm->set_byte_at(2, kFiveByteNopInstruction); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, + mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, + kReturnOnNoNeedToInformIncrementalMarker, + mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ ret(0); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. + } else { + ASSERT(mode == INCREMENTAL); + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value. + } + __ mov(Operand(esp, 2 * kPointerSize), + Immediate(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label object_is_black, need_incremental, need_incremental_pop_object; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), + regs_.scratch0(), + regs_.scratch1(), + &object_is_black, + Label::kNear); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&object_is_black); + + // Get the value from the slot. + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + zero, + &ensure_not_white, + Label::kNear); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + not_zero, + &ensure_not_white, + Label::kNear); + + __ jmp(&need_incremental); + + __ bind(&ensure_not_white); + } + + // We need an extra register for this, so we push the object register + // temporarily. + __ push(regs_.object()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + &need_incremental_pop_object, + Label::kNear); + __ pop(regs_.object()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&need_incremental_pop_object); + __ pop(regs_.object()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index fa255da1fd..2a7d316f47 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -60,6 +60,25 @@ class TranscendentalCacheStub: public CodeStub { }; +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated() { return true; } + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -418,6 +437,8 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); + virtual bool SometimesSetsUpAFrame() { return false; } + private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -430,7 +451,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryNegativeLookup; } + Major MajorKey() { return StringDictionaryLookup; } int MinorKey() { return DictionaryBits::encode(dictionary_.code()) | @@ -451,6 +472,272 @@ class StringDictionaryLookupStub: public CodeStub { }; +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. + static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8. + + static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32. + static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32. + + static Mode GetMode(Code* stub) { + byte first_instruction = stub->instruction_start()[0]; + byte second_instruction = stub->instruction_start()[2]; + + if (first_instruction == kTwoByteJumpInstruction) { + return INCREMENTAL; + } + + ASSERT(first_instruction == kTwoByteNopInstruction); + + if (second_instruction == kFiveByteJumpInstruction) { + return INCREMENTAL_COMPACTION; + } + + ASSERT(second_instruction == kFiveByteNopInstruction); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteNopInstruction; + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteJumpInstruction; + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteJumpInstruction; + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start(), 7); + } + + private: + // This is a helper class for freeing up 3 scratch registers, where the third + // is always ecx (needed for shift operations). The input is two registers + // that must be preserved and one scratch register provided by the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_orig_(object), + address_orig_(address), + scratch0_orig_(scratch0), + object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_); + if (scratch0.is(ecx)) { + scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_); + } + if (object.is(ecx)) { + object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_); + } + if (address.is(ecx)) { + address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_); + } + ASSERT(!AreAliased(scratch0_, object_, address_, ecx)); + } + + void Save(MacroAssembler* masm) { + ASSERT(!address_orig_.is(object_)); + ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); + ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); + // We don't have to save scratch0_orig_ because it was given to us as + // a scratch register. But if we had to switch to a different reg then + // we should save the new scratch0_. + if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_); + if (!ecx.is(scratch0_orig_) && + !ecx.is(object_orig_) && + !ecx.is(address_orig_)) { + masm->push(ecx); + } + masm->push(scratch1_); + if (!address_.is(address_orig_)) { + masm->push(address_); + masm->mov(address_, address_orig_); + } + if (!object_.is(object_orig_)) { + masm->push(object_); + masm->mov(object_, object_orig_); + } + } + + void Restore(MacroAssembler* masm) { + // These will have been preserved the entire time, so we just need to move + // them back. Only in one case is the orig_ reg different from the plain + // one, since only one of them can alias with ecx. + if (!object_.is(object_orig_)) { + masm->mov(object_orig_, object_); + masm->pop(object_); + } + if (!address_.is(address_orig_)) { + masm->mov(address_orig_, address_); + masm->pop(address_); + } + masm->pop(scratch1_); + if (!ecx.is(scratch0_orig_) && + !ecx.is(object_orig_) && + !ecx.is(address_orig_)) { + masm->pop(ecx); + } + if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The caller saved + // registers are eax, ecx and edx. The three scratch registers (incl. ecx) + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); + if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + masm->sub(esp, + Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + // Save all XMM registers except XMM0. + for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { + XMMRegister reg = XMMRegister::from_code(i); + masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg); + } + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + // Restore all XMM registers except XMM0. + for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { + XMMRegister reg = XMMRegister::from_code(i); + masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize)); + } + masm->add(esp, + Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + } + if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); + if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_orig_; + Register address_orig_; + Register scratch0_orig_; + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + // Third scratch register is always ecx. + + Register GetRegThatIsNotEcxOr(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(ecx)) continue; + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + } +; + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + bool MustBeInStubCache() { + // All stubs must be registered in the stub cache + // otherwise IncrementalMarker would not be able to find + // and patch it. + return true; + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 3> {}; + class ValueBits: public BitField<int, 3, 3> {}; + class AddressBits: public BitField<int, 6, 3> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {}; + class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + RegisterAllocation regs_; +}; + + } } // namespace v8::internal #endif // V8_IA32_CODE_STUBS_IA32_H_ diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 3a657bd541..f901b6f888 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -39,12 +39,16 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterInternalFrame(); + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveInternalFrame(); + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); } @@ -108,14 +112,14 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ mov(edx, dst); __ and_(edx, 0xF); __ neg(edx); - __ add(Operand(edx), Immediate(16)); - __ add(dst, Operand(edx)); - __ add(src, Operand(edx)); - __ sub(Operand(count), edx); + __ add(edx, Immediate(16)); + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); // edi is now aligned. Check if esi is also aligned. Label unaligned_source; - __ test(Operand(src), Immediate(0x0F)); + __ test(src, Immediate(0x0F)); __ j(not_zero, &unaligned_source); { // Copy loop for aligned source and destination. @@ -130,11 +134,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ prefetch(Operand(src, 0x20), 1); __ movdqa(xmm0, Operand(src, 0x00)); __ movdqa(xmm1, Operand(src, 0x10)); - __ add(Operand(src), Immediate(0x20)); + __ add(src, Immediate(0x20)); __ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x10), xmm1); - __ add(Operand(dst), Immediate(0x20)); + __ add(dst, Immediate(0x20)); __ dec(loop_count); __ j(not_zero, &loop); @@ -142,12 +146,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { // At most 31 bytes to copy. Label move_less_16; - __ test(Operand(count), Immediate(0x10)); + __ test(count, Immediate(0x10)); __ j(zero, &move_less_16); __ movdqa(xmm0, Operand(src, 0)); - __ add(Operand(src), Immediate(0x10)); + __ add(src, Immediate(0x10)); __ movdqa(Operand(dst, 0), xmm0); - __ add(Operand(dst), Immediate(0x10)); + __ add(dst, Immediate(0x10)); __ bind(&move_less_16); // At most 15 bytes to copy. Copy 16 bytes at end of string. @@ -176,11 +180,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ prefetch(Operand(src, 0x20), 1); __ movdqu(xmm0, Operand(src, 0x00)); __ movdqu(xmm1, Operand(src, 0x10)); - __ add(Operand(src), Immediate(0x20)); + __ add(src, Immediate(0x20)); __ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x10), xmm1); - __ add(Operand(dst), Immediate(0x20)); + __ add(dst, Immediate(0x20)); __ dec(loop_count); __ j(not_zero, &loop); @@ -188,12 +192,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { // At most 31 bytes to copy. Label move_less_16; - __ test(Operand(count), Immediate(0x10)); + __ test(count, Immediate(0x10)); __ j(zero, &move_less_16); __ movdqu(xmm0, Operand(src, 0)); - __ add(Operand(src), Immediate(0x10)); + __ add(src, Immediate(0x10)); __ movdqa(Operand(dst, 0), xmm0); - __ add(Operand(dst), Immediate(0x10)); + __ add(dst, Immediate(0x10)); __ bind(&move_less_16); // At most 15 bytes to copy. Copy 16 bytes at end of string. @@ -228,10 +232,10 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ mov(edx, dst); __ and_(edx, 0x03); __ neg(edx); - __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3) - __ add(dst, Operand(edx)); - __ add(src, Operand(edx)); - __ sub(Operand(count), edx); + __ add(edx, Immediate(4)); // edx = 4 - (dst & 3) + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); // edi is now aligned, ecx holds number of remaning bytes to copy. __ mov(edx, count); diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index 2389948866..d7184ed208 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -100,63 +100,64 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList non_object_regs, bool convert_call_to_jmp) { // Enter an internal frame. - __ EnterInternalFrame(); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((object_regs & (1 << r)) != 0) { - __ push(reg); - } - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ test(reg, Immediate(0xc0000000)); - __ Assert(zero, "Unable to encode value as smi"); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((object_regs & (1 << r)) != 0) { + __ push(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ test(reg, Immediate(0xc0000000)); + __ Assert(zero, "Unable to encode value as smi"); + } + __ SmiTag(reg); + __ push(reg); } - __ SmiTag(reg); - __ push(reg); } - } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ Set(eax, Immediate(0)); // No arguments. - __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values containing object pointers from the expression - // stack. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if (FLAG_debug_code) { - __ Set(reg, Immediate(kDebugZapValue)); - } - if ((object_regs & (1 << r)) != 0) { - __ pop(reg); - } - if ((non_object_regs & (1 << r)) != 0) { - __ pop(reg); - __ SmiUntag(reg); + __ Set(eax, Immediate(0)); // No arguments. + __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values containing object pointers from the + // expression stack. + for (int i = kNumJSCallerSaved; --i >= 0;) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if (FLAG_debug_code) { + __ Set(reg, Immediate(kDebugZapValue)); + } + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + __ pop(reg); + __ SmiUntag(reg); + } } - } - // Get rid of the internal frame. - __ LeaveInternalFrame(); + // Get rid of the internal frame. + } // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. if (convert_call_to_jmp) { - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); } // Now that the break point has been handled, resume normal execution by @@ -298,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); // Re-run JSFunction, edi is function, esi is context. - __ jmp(Operand(edx)); + __ jmp(edx); } const bool Debug::kFrameDropperSupported = true; diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index e23f3e9eff..02cc4ebd3b 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -116,7 +116,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { new_reloc->GetDataStartAddress() + padding, 0); intptr_t comment_string = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString); - RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string); + RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL); for (int i = 0; i < additional_comments; ++i) { #ifdef DEBUG byte* pos_before = reloc_info_writer.pos(); @@ -174,7 +174,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // We use RUNTIME_ENTRY for deoptimization bailouts. RelocInfo rinfo(curr_address + 1, // 1 after the call opcode. RelocInfo::RUNTIME_ENTRY, - reinterpret_cast<intptr_t>(deopt_entry)); + reinterpret_cast<intptr_t>(deopt_entry), + NULL); reloc_info_writer.Write(&rinfo); ASSERT_GE(reloc_info_writer.pos(), reloc_info->address() + ByteArray::kHeaderSize); @@ -205,6 +206,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(code); + // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -221,7 +227,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code) { Address call_target_address = pc_after - kIntSize; @@ -250,6 +257,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x90; // nop Assembler::set_target_address_at(call_target_address, replacement_code->entry()); + + RelocInfo rinfo(call_target_address, + RelocInfo::CODE_TARGET, + 0, + unoptimized_code); + unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( + unoptimized_code, &rinfo, replacement_code); } @@ -268,6 +282,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x07; // offset Assembler::set_target_address_at(call_target_address, check_code->entry()); + + check_code->GetHeap()->incremental_marking()-> + RecordCodeTargetPatch(call_target_address, check_code); } @@ -415,7 +432,14 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0]->SetPc(reinterpret_cast<uint32_t>(from_)); } else { // Setup the frame pointer and the context pointer. - output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code())); + // All OSR stack frames are dynamically aligned to an 8-byte boundary. + int frame_pointer = input_->GetRegister(ebp.code()); + if ((frame_pointer & 0x4) == 0) { + // Return address at FP + 4 should be aligned, so FP mod 8 should be 4. + frame_pointer -= kPointerSize; + has_alignment_padding_ = 1; + } + output_[0]->SetRegister(ebp.code(), frame_pointer); output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code())); unsigned pc_offset = data->OsrPcOffset()->value(); @@ -480,9 +504,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // top address and the current frame's size. uint32_t top_address; if (is_bottommost) { - // 2 = context and function in the frame. - top_address = - input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes; + // If the optimized frame had alignment padding, adjust the frame pointer + // to point to the new position of the old frame pointer after padding + // is removed. Subtract 2 * kPointerSize for the context and function slots. + top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) - + height_in_bytes + has_alignment_padding_ * kPointerSize; } else { top_address = output_[frame_index - 1]->GetTop() - output_frame_size; } @@ -533,7 +559,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, } output_frame->SetFrameSlot(output_offset, value); intptr_t fp_value = top_address + output_offset; - ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value); + ASSERT(!is_bottommost || + input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize + == fp_value); output_frame->SetFp(fp_value); if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value); if (FLAG_trace_deopt) { @@ -638,7 +666,7 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumAllocatableRegisters; - __ sub(Operand(esp), Immediate(kDoubleRegsSize)); + __ sub(esp, Immediate(kDoubleRegsSize)); for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int offset = i * kDoubleSize; @@ -662,7 +690,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); } - __ sub(edx, Operand(ebp)); + __ sub(edx, ebp); __ neg(edx); // Allocate a new deoptimizer object. @@ -675,7 +703,10 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. __ mov(Operand(esp, 5 * kPointerSize), Immediate(ExternalReference::isolate_address())); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + } // Preserve deoptimizer object in register eax and get the input // frame descriptor pointer. @@ -698,15 +729,15 @@ void Deoptimizer::EntryGenerator::Generate() { // Remove the bailout id and the double registers from the stack. if (type() == EAGER) { - __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize)); + __ add(esp, Immediate(kDoubleRegsSize + kPointerSize)); } else { - __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize)); + __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); } // Compute a pointer to the unwinding limit in register ecx; that is // the first stack slot not part of the input frame. __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); - __ add(ecx, Operand(esp)); + __ add(ecx, esp); // Unwind the stack down to - but not including - the unwinding // limit and copy the contents of the activation frame to the input @@ -715,18 +746,43 @@ void Deoptimizer::EntryGenerator::Generate() { Label pop_loop; __ bind(&pop_loop); __ pop(Operand(edx, 0)); - __ add(Operand(edx), Immediate(sizeof(uint32_t))); - __ cmp(ecx, Operand(esp)); + __ add(edx, Immediate(sizeof(uint32_t))); + __ cmp(ecx, esp); __ j(not_equal, &pop_loop); + // If frame was dynamically aligned, pop padding. + Label sentinel, sentinel_done; + __ pop(ecx); + __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset())); + __ j(equal, &sentinel); + __ push(ecx); + __ jmp(&sentinel_done); + __ bind(&sentinel); + __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()), + Immediate(1)); + __ bind(&sentinel_done); // Compute the output frame in the deoptimizer. __ push(eax); __ PrepareCallCFunction(1, ebx); __ mov(Operand(esp, 0 * kPointerSize), eax); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 1); + } __ pop(eax); + if (type() == OSR) { + // If alignment padding is added, push the sentinel. + Label no_osr_padding; + __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()), + Immediate(0)); + __ j(equal, &no_osr_padding, Label::kNear); + __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset())); + __ bind(&no_osr_padding); + } + + // Replace the current frame with the output frames. Label outer_push_loop, inner_push_loop; // Outer loop state: eax = current FrameDescription**, edx = one past the @@ -739,12 +795,12 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(ebx, Operand(eax, 0)); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ bind(&inner_push_loop); - __ sub(Operand(ecx), Immediate(sizeof(uint32_t))); + __ sub(ecx, Immediate(sizeof(uint32_t))); __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, &inner_push_loop); - __ add(Operand(eax), Immediate(kPointerSize)); - __ cmp(eax, Operand(edx)); + __ add(eax, Immediate(kPointerSize)); + __ cmp(eax, edx); __ j(below, &outer_push_loop); // In case of OSR, we have to restore the XMM registers. diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index a936277b2f..04edc5f427 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -55,6 +55,7 @@ struct ByteMnemonic { static const ByteMnemonic two_operands_instr[] = { + {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER}, {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER}, @@ -117,6 +118,19 @@ static const ByteMnemonic short_immediate_instr[] = { }; +// Generally we don't want to generate these because they are subject to partial +// register stalls. They are included for completeness and because the cmp +// variant is used by the RecordWrite stub. Because it does not update the +// register it is not subject to partial register stalls. +static ByteMnemonic byte_immediate_instr[] = { + {0x0c, "or", UNSET_OP_ORDER}, + {0x24, "and", UNSET_OP_ORDER}, + {0x34, "xor", UNSET_OP_ORDER}, + {0x3c, "cmp", UNSET_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + static const char* const jump_conditional_mnem[] = { /*0*/ "jo", "jno", "jc", "jnc", /*4*/ "jz", "jnz", "jna", "ja", @@ -149,7 +163,8 @@ enum InstructionType { REGISTER_INSTR, MOVE_REG_INSTR, CALL_JUMP_INSTR, - SHORT_IMMEDIATE_INSTR + SHORT_IMMEDIATE_INSTR, + BYTE_IMMEDIATE_INSTR }; @@ -198,6 +213,7 @@ void InstructionTable::Init() { CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR); CopyTable(call_jump_instr, CALL_JUMP_INSTR); CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR); + CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR); AddJumpConditionalShort(); SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc"); SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec"); @@ -912,6 +928,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, break; } + case BYTE_IMMEDIATE_INSTR: { + AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]); + data += 2; + break; + } + case NO_INSTR: processed = false; break; @@ -1346,11 +1368,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, data += 2; break; - case 0x2C: - AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1)); - data += 2; - break; - case 0xA9: AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1)); data += 5; diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 81c9ccb128..33d5cabad7 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // function calls. if (info->is_strict_mode() || info->is_native()) { Label ok; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &ok, Label::kNear); // +1 for return address. int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize; @@ -147,6 +147,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + __ push(ebp); // Caller's frame pointer. __ mov(ebp, esp); __ push(esi); // Callee's context. @@ -200,11 +205,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers all involved - // registers, so we have use a third register to avoid - // clobbering esi. - __ mov(ecx, esi); - __ RecordWrite(ecx, context_offset, eax, ebx); + // Update the write barrier. This clobbers eax and ebx. + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx, + kDontSaveFPRegs); } } } @@ -260,7 +266,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -365,10 +371,10 @@ void FullCodeGenerator::EmitReturnSequence() { void FullCodeGenerator::verify_stack_height() { ASSERT(FLAG_verify_stack_height); - __ sub(Operand(ebp), Immediate(kPointerSize * stack_height())); - __ cmp(ebp, Operand(esp)); + __ sub(ebp, Immediate(kPointerSize * stack_height())); + __ cmp(ebp, esp); __ Assert(equal, "Full codegen stack height not as expected."); - __ add(Operand(ebp), Immediate(kPointerSize * stack_height())); + __ add(ebp, Immediate(kPointerSize * stack_height())); } @@ -597,7 +603,7 @@ void FullCodeGenerator::DoTest(Expression* condition, ToBooleanStub stub(result_register()); __ push(result_register()); __ CallStub(&stub, condition->test_id()); - __ test(result_register(), Operand(result_register())); + __ test(result_register(), result_register()); // The stub returns nonzero for true. Split(not_zero, if_true, if_false, fall_through); } @@ -661,11 +667,12 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ mov(location, src); + // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { int offset = Context::SlotOffset(var->index()); ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); - __ RecordWrite(scratch0, offset, src, scratch1); + __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); } } @@ -697,7 +704,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -715,7 +722,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ mov(StackOperand(variable), result_register()); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ mov(StackOperand(variable), Immediate(isolate()->factory()->the_hole_value())); @@ -738,11 +745,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ mov(ContextOperand(esi, variable->index()), result_register()); - int offset = Context::SlotOffset(variable->index()); - __ mov(ebx, esi); - __ RecordWrite(ebx, offset, result_register(), ecx); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + result_register(), + ecx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ mov(ContextOperand(esi, variable->index()), Immediate(isolate()->factory()->the_hole_value())); @@ -756,10 +768,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ push(esi); __ push(Immediate(variable->name())); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); - PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; + ASSERT(mode == VAR || mode == CONST || mode == LET); + PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; __ push(Immediate(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -768,7 +778,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, increment_stack_height(3); if (function != NULL) { VisitForStackValue(function); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { __ push(Immediate(isolate()->factory()->the_hole_value())); increment_stack_height(); } else { @@ -835,10 +845,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { if (inline_smi_code) { Label slow_case; __ mov(ecx, edx); - __ or_(ecx, Operand(eax)); + __ or_(ecx, eax); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. __ jmp(clause->body_target()); @@ -850,7 +860,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. __ jmp(clause->body_target()); @@ -939,7 +949,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // For all objects but the receiver, check that the cache is empty. Label check_prototype; - __ cmp(ecx, Operand(eax)); + __ cmp(ecx, eax); __ j(equal, &check_prototype, Label::kNear); __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); __ cmp(edx, isolate()->factory()->empty_fixed_array()); @@ -1021,9 +1031,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ push(ecx); // Enumerable. __ push(ebx); // Current entry. __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(equal, loop_statement.continue_label()); - __ mov(ebx, Operand(eax)); + __ mov(ebx, eax); // Update the 'each' property or variable from the possibly filtered // entry in register ebx. @@ -1047,7 +1057,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // Remove the pointers stored on the stack. __ bind(loop_statement.break_label()); - __ add(Operand(esp), Immediate(5 * kPointerSize)); + __ add(esp, Immediate(5 * kPointerSize)); decrement_stack_height(ForIn::kElementCount); // Exit and decrement the loop depth. @@ -1189,16 +1199,22 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == Variable::DYNAMIC_GLOBAL) { + if (var->mode() == DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ jmp(done); - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ mov(eax, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST) { + if (local->mode() == CONST || + local->mode() == LET) { __ cmp(eax, isolate()->factory()->the_hole_value()); __ j(not_equal, done); - __ mov(eax, isolate()->factory()->undefined_value()); + if (local->mode() == CONST) { + __ mov(eax, isolate()->factory()->undefined_value()); + } else { // LET + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } } __ jmp(done); } @@ -1231,7 +1247,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { Comment cmnt(masm_, var->IsContextSlot() ? "Context variable" : "Stack variable"); - if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { + if (var->mode() != LET && var->mode() != CONST) { context()->Plug(var); } else { // Let and const need a read barrier. @@ -1239,10 +1255,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { GetVar(eax, var); __ cmp(eax, isolate()->factory()->the_hole_value()); __ j(not_equal, &done, Label::kNear); - if (var->mode() == Variable::LET) { + if (var->mode() == LET) { __ push(Immediate(var->name())); __ CallRuntime(Runtime::kThrowReferenceError, 1); - } else { // Variable::CONST + } else { // CONST __ mov(eax, isolate()->factory()->undefined_value()); } __ bind(&done); @@ -1480,8 +1496,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ mov(FieldOperand(ebx, offset), result_register()); + Label no_map_change; + __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store. - __ RecordWrite(ebx, offset, result_register(), ecx); + __ RecordWriteField(ebx, offset, result_register(), ecx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset)); + __ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear); + __ push(Operand(esp, 0)); + __ CallRuntime(Runtime::kNonSmiElementStored, 1); + __ bind(&no_map_change); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1641,7 +1667,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ pop(edx); decrement_stack_height(); __ mov(ecx, eax); - __ or_(eax, Operand(edx)); + __ or_(eax, edx); JumpPatchSite patch_site(masm_); patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear); @@ -1691,32 +1717,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, break; } case Token::ADD: - __ add(eax, Operand(ecx)); + __ add(eax, ecx); __ j(overflow, &stub_call); break; case Token::SUB: - __ sub(eax, Operand(ecx)); + __ sub(eax, ecx); __ j(overflow, &stub_call); break; case Token::MUL: { __ SmiUntag(eax); - __ imul(eax, Operand(ecx)); + __ imul(eax, ecx); __ j(overflow, &stub_call); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &done, Label::kNear); __ mov(ebx, edx); - __ or_(ebx, Operand(ecx)); + __ or_(ebx, ecx); __ j(negative, &stub_call); break; } case Token::BIT_OR: - __ or_(eax, Operand(ecx)); + __ or_(eax, ecx); break; case Token::BIT_AND: - __ and_(eax, Operand(ecx)); + __ and_(eax, ecx); break; case Token::BIT_XOR: - __ xor_(eax, Operand(ecx)); + __ xor_(eax, ecx); break; default: UNREACHABLE(); @@ -1838,7 +1864,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { + } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(eax); // Value. @@ -1859,11 +1885,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ mov(location, eax); if (var->IsContextSlot()) { __ mov(edx, eax); - __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs); } } - } else if (var->mode() != Variable::CONST) { + } else if (var->mode() != CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, ecx); @@ -1877,7 +1904,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ mov(location, eax); if (var->IsContextSlot()) { __ mov(edx, eax); - __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs); } } else { ASSERT(var->IsLookupSlot()); @@ -2069,8 +2097,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { } // Record source position for debugger. SetSourcePosition(expr->position()); + + // Record call targets in unoptimized code, but not in the snapshot. + bool record_call_target = !Serializer::enabled(); + if (record_call_target) { + flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET); + } CallFunctionStub stub(arg_count, flags); __ CallStub(&stub); + if (record_call_target) { + // There is a one element cache in the instruction stream. +#ifdef DEBUG + int return_site_offset = masm()->pc_offset(); +#endif + Handle<Object> uninitialized = + CallFunctionStub::UninitializedSentinel(isolate()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(uninitialized); + __ test(eax, Immediate(cell)); + // Patching code in the stub assumes the opcode is 1 byte and there is + // word for a pointer in the operand. + ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize); + } + RecordJSReturnSite(expr); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -2094,10 +2143,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = strict_mode_flag(); - if (FLAG_harmony_block_scoping) { - strict_mode = kStrictMode; - } + StrictModeFlag strict_mode = + FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); __ push(Immediate(Smi::FromInt(strict_mode))); __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP @@ -2140,7 +2187,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2438,9 +2485,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( STATIC_ASSERT(kPointerSize == 4); __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // Calculate location of the first key name. - __ add(Operand(ebx), - Immediate(FixedArray::kHeaderSize + - DescriptorArray::kFirstIndex * kPointerSize)); + __ add(ebx, + Immediate(FixedArray::kHeaderSize + + DescriptorArray::kFirstIndex * kPointerSize)); // Loop through all the keys in the descriptor array. If one of these is the // symbol valueOf the result is false. Label entry, loop; @@ -2449,9 +2496,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ mov(edx, FieldOperand(ebx, 0)); __ cmp(edx, FACTORY->value_of_symbol()); __ j(equal, if_false); - __ add(Operand(ebx), Immediate(kPointerSize)); + __ add(ebx, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(not_equal, &loop); // Reload map as register ebx was used as temporary above. @@ -2591,7 +2638,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) { __ pop(ebx); decrement_stack_height(); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(equal, if_true, if_false, fall_through); @@ -2647,20 +2694,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax); // Map is now in eax. __ j(below, &null); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE); - __ j(above_equal, &function); - - // Check if the constructor in the map is a function. + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ j(equal, &function); + + __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ j(equal, &function); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. __ mov(eax, FieldOperand(eax, Map::kConstructorOffset)); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); __ j(not_equal, &non_function_constructor); @@ -2741,8 +2792,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope fscope(SSE2); __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. - __ movd(xmm1, Operand(ebx)); - __ movd(xmm0, Operand(eax)); + __ movd(xmm1, ebx); + __ movd(xmm0, eax); __ cvtss2sd(xmm1, xmm1); __ xorps(xmm0, xmm1); __ subsd(xmm0, xmm1); @@ -2843,10 +2894,11 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { // Store the value. __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax); + // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. __ mov(edx, eax); - __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx); + __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs); __ bind(&done); context()->Plug(eax); @@ -3119,14 +3171,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ mov(index_1, Operand(esp, 1 * kPointerSize)); __ mov(index_2, Operand(esp, 0)); __ mov(temp, index_1); - __ or_(temp, Operand(index_2)); + __ or_(temp, index_2); __ JumpIfNotSmi(temp, &slow_case); // Check that both indices are valid. __ mov(temp, FieldOperand(object, JSArray::kLengthOffset)); - __ cmp(temp, Operand(index_1)); + __ cmp(temp, index_1); __ j(below_equal, &slow_case); - __ cmp(temp, Operand(index_2)); + __ cmp(temp, index_2); __ j(below_equal, &slow_case); // Bring addresses into index1 and index2. @@ -3139,16 +3191,35 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ mov(Operand(index_2, 0), object); __ mov(Operand(index_1, 0), temp); - Label new_space; - __ InNewSpace(elements, temp, equal, &new_space); + Label no_remembered_set; + __ CheckPageFlag(elements, + temp, + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &no_remembered_set, + Label::kNear); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + // We are swapping two objects in an array and the incremental marker never + // pauses in the middle of scanning a single object. Therefore the + // incremental marker is not disturbed, so we don't need to call the + // RecordWrite stub that notifies the incremental marker. + __ RememberedSetHelper(elements, + index_1, + temp, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + __ RememberedSetHelper(elements, + index_2, + temp, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + + __ bind(&no_remembered_set); - __ mov(object, elements); - __ RecordWriteHelper(object, index_1, temp); - __ RecordWriteHelper(elements, index_2, temp); - - __ bind(&new_space); // We are done. Drop elements from the stack, and return undefined. - __ add(Operand(esp), Immediate(3 * kPointerSize)); + __ add(esp, Immediate(3 * kPointerSize)); __ mov(eax, isolate()->factory()->undefined_value()); __ jmp(&done); @@ -3221,11 +3292,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) { __ pop(left); Label done, fail, ok; - __ cmp(left, Operand(right)); + __ cmp(left, right); __ j(equal, &ok); // Fail if either is a non-HeapObject. __ mov(tmp, left); - __ and_(Operand(tmp), right); + __ and_(tmp, right); __ JumpIfSmi(tmp, &fail); __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset)); __ CmpInstanceType(tmp, JS_REGEXP_TYPE); @@ -3316,7 +3387,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { Operand separator_operand = Operand(esp, 2 * kPointerSize); Operand result_operand = Operand(esp, 1 * kPointerSize); Operand array_length_operand = Operand(esp, 0); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ cld(); // Check that the array is a JSArray __ JumpIfSmi(array, &bailout); @@ -3352,7 +3423,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { // Live loop registers: index, array_length, string, // scratch, string_length, elements. if (FLAG_debug_code) { - __ cmp(index, Operand(array_length)); + __ cmp(index, array_length); __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin"); } __ bind(&loop); @@ -3370,8 +3441,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ add(string_length, FieldOperand(string, SeqAsciiString::kLengthOffset)); __ j(overflow, &bailout); - __ add(Operand(index), Immediate(1)); - __ cmp(index, Operand(array_length)); + __ add(index, Immediate(1)); + __ cmp(index, array_length); __ j(less, &loop); // If array_length is 1, return elements[0], a string. @@ -3405,10 +3476,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { // to string_length. __ mov(scratch, separator_operand); __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset)); - __ sub(string_length, Operand(scratch)); // May be negative, temporarily. + __ sub(string_length, scratch); // May be negative, temporarily. __ imul(scratch, array_length_operand); __ j(overflow, &bailout); - __ add(string_length, Operand(scratch)); + __ add(string_length, scratch); __ j(overflow, &bailout); __ shr(string_length, 1); @@ -3449,7 +3520,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ bind(&loop_1_condition); __ cmp(index, array_length_operand); __ j(less, &loop_1); // End while (index < length). @@ -3490,7 +3561,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ cmp(index, array_length_operand); __ j(less, &loop_2); // End while (index < length). @@ -3531,7 +3602,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ cmp(index, array_length_operand); __ j(less, &loop_3); // End while (index < length). @@ -3543,7 +3614,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ bind(&done); __ mov(eax, result_operand); // Drop temp values from the stack, and restore context register. - __ add(Operand(esp), Immediate(3 * kPointerSize)); + __ add(esp, Immediate(3 * kPointerSize)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); decrement_stack_height(); @@ -3823,9 +3894,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (ShouldInlineSmiCase(expr->op())) { if (expr->op() == Token::INC) { - __ add(Operand(eax), Immediate(Smi::FromInt(1))); + __ add(eax, Immediate(Smi::FromInt(1))); } else { - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); } __ j(overflow, &stub_call, Label::kNear); // We could eliminate this smi check if we split the code at @@ -3835,9 +3906,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ bind(&stub_call); // Call stub. Undo operation first. if (expr->op() == Token::INC) { - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); } else { - __ add(Operand(eax), Immediate(Smi::FromInt(1))); + __ add(eax, Immediate(Smi::FromInt(1))); } } @@ -3956,10 +4027,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through) { + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3998,8 +4073,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, Split(not_zero, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(eax, if_false); - __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx); - Split(above_equal, if_true, if_false, fall_through); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); + __ j(equal, if_true); + __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE); + Split(equal, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(eax, if_false); if (!FLAG_harmony_typeof) { @@ -4017,18 +4095,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ cmp(eax, isolate()->factory()->undefined_value()); - Split(equal, if_true, if_false, fall_through); + context()->Plug(if_true, if_false); } @@ -4036,9 +4103,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. - Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4046,16 +4116,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { - context()->Plug(if_true, if_false); - return; - } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); - switch (expr->op()) { + switch (op) { case Token::IN: VisitForStackValue(expr->right()); __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); @@ -4071,7 +4134,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { __ CallStub(&stub); decrement_stack_height(2); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ test(eax, Operand(eax)); + __ test(eax, eax); // The stub returns 0 for true. Split(zero, if_true, if_false, fall_through); break; @@ -4080,11 +4143,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); Condition cc = no_condition; - bool strict = false; switch (op) { case Token::EQ_STRICT: - strict = true; - // Fall through case Token::EQ: cc = equal; __ pop(edx); @@ -4120,10 +4180,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { JumpPatchSite patch_site(masm_); if (inline_smi_code) { Label slow_case; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); Split(cc, if_true, if_false, NULL); __ bind(&slow_case); } @@ -4135,7 +4195,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ test(eax, Operand(eax)); + __ test(eax, eax); Split(cc, if_true, if_false, fall_through); } } @@ -4146,7 +4206,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4154,15 +4216,20 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(expr->expression()); + VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ cmp(eax, isolate()->factory()->null_value()); - if (expr->is_strict()) { + Handle<Object> nil_value = nil == kNullValue ? + isolate()->factory()->null_value() : + isolate()->factory()->undefined_value(); + __ cmp(eax, nil_value); + if (expr->op() == Token::EQ_STRICT) { Split(equal, if_true, if_false, fall_through); } else { + Handle<Object> other_nil_value = nil == kNullValue ? + isolate()->factory()->undefined_value() : + isolate()->factory()->null_value(); __ j(equal, if_true); - __ cmp(eax, isolate()->factory()->undefined_value()); + __ cmp(eax, other_nil_value); __ j(equal, if_true); __ JumpIfSmi(eax, if_false); // It can be an undetectable object. @@ -4229,7 +4296,7 @@ void FullCodeGenerator::EnterFinallyBlock() { // Cook return address on top of stack (smi encoded Code* delta) ASSERT(!result_register().is(edx)); __ pop(edx); - __ sub(Operand(edx), Immediate(masm_->CodeObject())); + __ sub(edx, Immediate(masm_->CodeObject())); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); STATIC_ASSERT(kSmiTag == 0); __ SmiTag(edx); @@ -4245,8 +4312,8 @@ void FullCodeGenerator::ExitFinallyBlock() { // Uncook return address. __ pop(edx); __ SmiUntag(edx); - __ add(Operand(edx), Immediate(masm_->CodeObject())); - __ jmp(Operand(edx)); + __ add(edx, Immediate(masm_->CodeObject())); + __ jmp(edx); } diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 9b5cc56401..8a98b179d3 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update write barrier. Make sure not to clobber the value. __ mov(r1, value); - __ RecordWrite(elements, r0, r1); + __ RecordWrite(elements, r0, r1, kDontSaveFPRegs); } @@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // Fast case: Do the load. STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize)); - __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value())); + __ cmp(scratch, Immediate(FACTORY->the_hole_value())); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. __ j(equal, out_of_range); @@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // Check if element is in the range of mapped arguments. If not, jump // to the unmapped lookup with the parameter map in scratch1. __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset)); - __ sub(Operand(scratch2), Immediate(Smi::FromInt(2))); - __ cmp(key, Operand(scratch2)); + __ sub(scratch2, Immediate(Smi::FromInt(2))); + __ cmp(key, scratch2); __ j(greater_equal, unmapped_case); // Load element index and check whether it is the hole. @@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); - __ cmp(key, Operand(scratch)); + __ cmp(key, scratch); __ j(greater_equal, slow_case); return FieldOperand(backing_store, key, @@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shr(ecx, KeyedLookupCache::kMapHashShift); __ mov(edi, FieldOperand(eax, String::kHashFieldOffset)); __ shr(edi, String::kHashShift); - __ xor_(ecx, Operand(edi)); + __ xor_(ecx, edi); __ and_(ecx, KeyedLookupCache::kCapacityMask); // Load the key (consisting of map and symbol) from the cache and @@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shl(edi, kPointerSizeLog2 + 1); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); - __ add(Operand(edi), Immediate(kPointerSize)); + __ add(edi, Immediate(kPointerSize)); __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); @@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ mov(edi, Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); - __ sub(edi, Operand(ecx)); + __ sub(edi, ecx); __ j(above_equal, &property_array_property); // Load in-object property. __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); - __ add(ecx, Operand(edi)); + __ add(ecx, edi); __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); @@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // Check that it has indexed interceptor and access checks // are not enabled for this object. __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset)); - __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask)); - __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor)); + __ and_(ecx, Immediate(kSlowCaseBitFieldMask)); + __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor)); __ j(not_zero, &slow); // Everything is fine, call runtime. @@ -710,7 +710,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ mov(mapped_location, eax); __ lea(ecx, mapped_location); __ mov(edx, eax); - __ RecordWrite(ebx, ecx, edx); + __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in ebx. @@ -719,7 +719,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ mov(unmapped_location, eax); __ lea(edi, unmapped_location); __ mov(edx, eax); - __ RecordWrite(ebx, edi, edx); + __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); @@ -734,7 +734,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label slow, fast, array, extra; + Label slow, fast_object_with_map_check, fast_object_without_map_check; + Label fast_double_with_map_check, fast_double_without_map_check; + Label check_if_double_array, array, extra; // Check that the object isn't a smi. __ JumpIfSmi(edx, &slow); @@ -750,22 +752,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CmpInstanceType(edi, JS_ARRAY_TYPE); __ j(equal, &array); // Check that the object is some kind of JSObject. - __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE); + __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE); __ j(below, &slow); - __ CmpInstanceType(edi, JS_PROXY_TYPE); - __ j(equal, &slow); - __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE); - __ j(equal, &slow); // Object case: Check key against length in the elements array. // eax: value // edx: JSObject // ecx: key (a smi) - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - // Check that the object is in fast mode and writable. - __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK); - __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); - __ j(below, &fast); + // edi: receiver map + __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + // Check array bounds. Both the key and the length of FixedArray are smis. + __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ j(below, &fast_object_with_map_check); // Slow case: call runtime. __ bind(&slow); @@ -778,16 +776,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // eax: value // edx: receiver, a JSArray // ecx: key, a smi. - // edi: receiver->elements, a FixedArray + // ebx: receiver->elements, a FixedArray + // edi: receiver map // flags: compare (ecx, edx.length()) // do not leave holes in the array: __ j(not_equal, &slow); - __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); + __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ j(above_equal, &slow); - // Add 1 to receiver->length, and go to fast array write. + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); + __ j(not_equal, &check_if_double_array); + // Add 1 to receiver->length, and go to common element store code for Objects. + __ add(FieldOperand(edx, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ jmp(&fast_object_without_map_check); + + __ bind(&check_if_double_array); + __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map()); + __ j(not_equal, &slow); + // Add 1 to receiver->length, and go to common element store code for doubles. __ add(FieldOperand(edx, JSArray::kLengthOffset), Immediate(Smi::FromInt(1))); - __ jmp(&fast); + __ jmp(&fast_double_without_map_check); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it @@ -796,24 +806,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // eax: value // edx: receiver, a JSArray // ecx: key, a smi. - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK); + // edi: receiver map + __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); - // Check the key against the length in the array, compute the - // address to store into and fall through to fast case. + // Check the key against the length in the array and fall through to the + // common store code. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis. __ j(above_equal, &extra); - // Fast case: Do the store. - __ bind(&fast); + // Fast case: Do the store, could either Object or double. + __ bind(&fast_object_with_map_check); // eax: value // ecx: key (a smi) // edx: receiver - // edi: FixedArray receiver->elements - __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax); + // ebx: FixedArray receiver->elements + // edi: receiver map + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); + __ j(not_equal, &fast_double_with_map_check); + __ bind(&fast_object_without_map_check); + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(eax, &non_smi_value); + // It's irrelevant whether array is smi-only or not when writing a smi. + __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax); + __ ret(0); + + __ bind(&non_smi_value); + // Escape to slow case when writing non-smi into smi-only array. + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(edi, &slow, Label::kNear); + + // Fast elements array, store the value to the elements backing store. + __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax); // Update write barrier for the elements array address. - __ mov(edx, Operand(eax)); - __ RecordWrite(edi, 0, edx, ecx); + __ mov(edx, eax); // Preserve the value which is returned. + __ RecordWriteArray( + ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ ret(0); + + __ bind(&fast_double_with_map_check); + // Check for fast double array case. If this fails, call through to the + // runtime. + __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map()); + __ j(not_equal, &slow); + __ bind(&fast_double_without_map_check); + // If the value is a number, store it as a double in the FastDoubleElements + // array. + __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false); __ ret(0); } @@ -951,22 +991,22 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack; 1 ~ return address. __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push the receiver and the name of the function. - __ push(edx); - __ push(ecx); + // Push the receiver and the name of the function. + __ push(edx); + __ push(ecx); - // Call the entry. - CEntryStub stub(1); - __ mov(eax, Immediate(2)); - __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate()))); - __ CallStub(&stub); + // Call the entry. + CEntryStub stub(1); + __ mov(eax, Immediate(2)); + __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate()))); + __ CallStub(&stub); - // Move result to edi and exit the internal frame. - __ mov(edi, eax); - __ LeaveInternalFrame(); + // Move result to edi and exit the internal frame. + __ mov(edi, eax); + } // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -1111,13 +1151,17 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1); - __ EnterInternalFrame(); - __ push(ecx); // save the key - __ push(edx); // pass the receiver - __ push(ecx); // pass the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(ecx); // restore the key - __ LeaveInternalFrame(); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(ecx); // save the key + __ push(edx); // pass the receiver + __ push(ecx); // pass the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(ecx); // restore the key + // Leave the internal frame. + } + __ mov(edi, eax); __ jmp(&do_call); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 4e3ea98161..9e1fd34af3 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -70,6 +70,17 @@ bool LCodeGen::GenerateCode() { ASSERT(is_unused()); status_ = GENERATING; CpuFeatures::Scope scope(SSE2); + + CodeStub::GenerateFPStubs(); + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + + dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 || + info()->osr_ast_id() != AstNode::kNoNumber; + return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -144,6 +155,29 @@ bool LCodeGen::GeneratePrologue() { __ bind(&ok); } + if (dynamic_frame_alignment_) { + Label do_not_pad, align_loop; + STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); + // Align esp to a multiple of 2 * kPointerSize. + __ test(esp, Immediate(kPointerSize)); + __ j(zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + // Copy arguments, receiver, and return address. + __ mov(ecx, Immediate(scope()->num_parameters() + 2)); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), + Immediate(isolate()->factory()->frame_alignment_marker())); + + __ bind(&do_not_pad); + } + __ push(ebp); // Caller's frame pointer. __ mov(ebp, esp); __ push(esi); // Callee's context. @@ -204,11 +238,12 @@ bool LCodeGen::GeneratePrologue() { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers all involved - // registers, so we have to use a third register to avoid - // clobbering esi. - __ mov(ecx, esi); - __ RecordWrite(ecx, context_offset, eax, ebx); + // Update the write barrier. This clobbers eax and ebx. + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx, + kDontSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -260,6 +295,9 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + Comment(";;; Deferred code @%d: %s.", + code->instruction_index(), + code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -481,14 +519,18 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction* instr, LOperand* context) { - ASSERT(context->IsRegister() || context->IsStackSlot()); if (context->IsRegister()) { if (!ToRegister(context).is(esi)) { __ mov(esi, ToRegister(context)); } - } else { - // Context is stack slot. + } else if (context->IsStackSlot()) { __ mov(esi, ToOperand(context)); + } else if (context->IsConstantOperand()) { + Handle<Object> literal = + chunk_->LookupLiteral(LConstantOperand::cast(context)); + LoadHeapObject(esi, Handle<Context>::cast(literal)); + } else { + UNREACHABLE(); } __ CallRuntimeSaveDoubles(id); @@ -669,7 +711,7 @@ void LCodeGen::RecordSafepoint( int arguments, int deoptimization_index) { ASSERT(kind == expected_safepoint_kind_); - const ZoneList<LOperand*>* operands = pointers->operands(); + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); for (int i = 0; i < operands->length(); i++) { @@ -1200,8 +1242,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { - ASSERT(instr->result()->IsRegister()); - __ Set(ToRegister(instr->result()), Immediate(instr->value())); + Register reg = ToRegister(instr->result()); + Handle<Object> handle = instr->value(); + if (handle->IsHeapObject()) { + LoadHeapObject(reg, Handle<HeapObject>::cast(handle)); + } else { + __ Set(reg, Immediate(handle)); + } } @@ -1577,23 +1624,33 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { +void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { Register reg = ToRegister(instr->InputAt(0)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); - // TODO(fsc): If the expression is known to be a smi, then it's - // definitely not null. Jump to the false block. + // If the expression is known to be untagged or a smi, then it's definitely + // not null, and it can't be a an undetectable object. + if (instr->hydrogen()->representation().IsSpecialization() || + instr->hydrogen()->type().IsSmi()) { + EmitGoto(false_block); + return; + } int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - __ cmp(reg, factory()->null_value()); - if (instr->is_strict()) { + Handle<Object> nil_value = instr->nil() == kNullValue ? + factory()->null_value() : + factory()->undefined_value(); + __ cmp(reg, nil_value); + if (instr->kind() == kStrictEquality) { EmitBranch(true_block, false_block, equal); } else { + Handle<Object> other_nil_value = instr->nil() == kNullValue ? + factory()->undefined_value() : + factory()->null_value(); Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ j(equal, true_label); - __ cmp(reg, factory()->undefined_value()); + __ cmp(reg, other_nil_value); __ j(equal, true_label); __ JumpIfSmi(reg, false_label); // Check for undetectable objects by looking in the bit field in @@ -1745,28 +1802,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. __ JumpIfSmi(input, is_false); - __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); - __ j(below, is_false); - // Map is now in temp. - // Functions have class 'Function'. - __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - __ j(above_equal, is_true); + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); + __ j(below, is_false); + __ j(equal, is_true); + __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); + __ j(equal, is_true); } else { - __ j(above_equal, is_false); + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmpb(Operand(temp2), + static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ j(above, is_false); } + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -1851,9 +1916,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - + virtual LInstruction* instr() { return instr_; } Label* map_check() { return &map_check_; } - private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -1991,6 +2055,17 @@ void LCodeGen::DoReturn(LReturn* instr) { } __ mov(esp, ebp); __ pop(ebp); + if (dynamic_frame_alignment_) { + Label aligned; + // Frame alignment marker (padding) is below arguments, + // and receiver, so its return-address-relative offset is + // (num_arguments + 2) words. + __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), + Immediate(factory()->frame_alignment_marker())); + __ j(not_equal, &aligned); + __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); + __ bind(&aligned); + } __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); } @@ -1998,7 +2073,7 @@ void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(result, Operand::Cell(instr->hydrogen()->cell())); - if (instr->hydrogen()->check_hole_value()) { + if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(result, factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } @@ -2019,20 +2094,34 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { + Register object = ToRegister(instr->TempAt(0)); + Register address = ToRegister(instr->TempAt(1)); Register value = ToRegister(instr->InputAt(0)); - Operand cell_operand = Operand::Cell(instr->hydrogen()->cell()); + ASSERT(!value.is(object)); + Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell()); + + int offset = JSGlobalPropertyCell::kValueOffset; + __ mov(object, Immediate(cell_handle)); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. We deoptimize in that case. - if (instr->hydrogen()->check_hole_value()) { - __ cmp(cell_operand, factory()->the_hole_value()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(FieldOperand(object, offset), factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } // Store the value. - __ mov(cell_operand, value); + __ mov(FieldOperand(object, offset), value); + + // Cells are always in the remembered set. + __ RecordWriteField(object, + offset, + value, + address, + kSaveFPRegs, + OMIT_REMEMBERED_SET); } @@ -2063,7 +2152,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { if (instr->needs_write_barrier()) { Register temp = ToRegister(instr->TempAt(0)); int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWrite(context, offset, value, temp); + __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs); } } @@ -2280,16 +2369,14 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result = ToDoubleRegister(instr->result()); - if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), - FAST_DOUBLE_ELEMENTS, - offset); - __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); - } + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), + FAST_DOUBLE_ELEMENTS, + offset); + __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); Operand double_load_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, @@ -2359,6 +2446,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -2680,6 +2768,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } + virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3005,7 +3094,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); - CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); } @@ -3062,7 +3151,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { Register temp = ToRegister(instr->TempAt(0)); // Update the write barrier for the object for in-object properties. - __ RecordWrite(object, offset, value, temp); + __ RecordWriteField(object, offset, value, temp, kSaveFPRegs); } } else { Register temp = ToRegister(instr->TempAt(0)); @@ -3071,7 +3160,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWrite(temp, offset, value, object); + __ RecordWriteField(temp, offset, value, object, kSaveFPRegs); } } } @@ -3130,6 +3219,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -3146,6 +3236,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; + // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS + // conversion, so it deopts in that case. + if (instr->hydrogen()->ValueNeedsSmiCheck()) { + __ test(value, Immediate(kSmiTagMask)); + DeoptimizeIf(not_zero, instr->environment()); + } + // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3168,7 +3265,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { key, times_pointer_size, FixedArray::kHeaderSize)); - __ RecordWrite(elements, key, value); + __ RecordWrite(elements, key, value, kSaveFPRegs); } } @@ -3212,6 +3309,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3334,6 +3432,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3413,6 +3512,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagI* instr_; }; @@ -3480,6 +3580,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3581,16 +3682,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } -class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - private: - LTaggedToI* instr_; -}; - - void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Label done, heap_number; Register input_reg = ToRegister(instr->InputAt(0)); @@ -3672,6 +3763,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LTaggedToI* instr_; + }; + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -3882,9 +3983,16 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { - ASSERT(instr->InputAt(0)->IsRegister()); - Operand operand = ToOperand(instr->InputAt(0)); - __ cmp(operand, instr->hydrogen()->target()); + Handle<JSFunction> target = instr->hydrogen()->target(); + if (isolate()->heap()->InNewSpace(*target)) { + Register reg = ToRegister(instr->value()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(target); + __ cmp(reg, Operand::Cell(cell)); + } else { + Operand operand = ToOperand(instr->value()); + __ cmp(operand, instr->hydrogen()->target()); + } DeoptimizeIf(not_equal, instr->environment()); } @@ -4188,10 +4296,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = not_zero; } else if (type_name->Equals(heap()->function_symbol())) { - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input); - final_branch_condition = above_equal; + __ CmpObjectType(input, JS_FUNCTION_TYPE, input); + __ j(equal, true_label); + __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); + final_branch_condition = equal; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4303,6 +4413,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 6156327420..6037c0868a 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED { inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), + dynamic_frame_alignment_(false), deferred_(8), osr_pc_offset_(-1), deoptimization_reloc_size(), @@ -133,6 +134,10 @@ class LCodeGen BASE_EMBEDDED { int strict_mode_flag() const { return info()->is_strict_mode() ? kStrictMode : kNonStrictMode; } + bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; } + void set_dynamic_frame_alignment(bool value) { + dynamic_frame_alignment_ = value; + } LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } @@ -297,6 +302,7 @@ class LCodeGen BASE_EMBEDDED { int inlined_function_count_; Scope* const scope_; Status status_; + bool dynamic_frame_alignment_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; @@ -346,16 +352,20 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), external_exit_(NULL) { + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; + virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -366,6 +376,7 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; + int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 3dc220d3d9..856106c799 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -214,10 +214,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNullAndBranch::PrintDataTo(StringStream* stream) { +void LIsNilAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(is_strict() ? " === null" : " == null"); + stream->Add(kind() == kStrictEquality ? " === " : " == "); + stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -351,7 +352,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { int LChunk::GetNextSpillIndex(bool is_double) { // Skip a slot if for a double-width slot. - if (is_double) spill_slot_count_++; + if (is_double) { + spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even. + spill_slot_count_++; + num_double_slots_++; + } return spill_slot_count_++; } @@ -707,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - instr->set_environment(CreateEnvironment(hydrogen_env)); + int argument_index_accumulator = 0; + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator)); return instr; } @@ -994,10 +1001,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { +LEnvironment* LChunkBuilder::CreateEnvironment( + HEnvironment* hydrogen_env, + int* argument_index_accumulator) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); + LEnvironment* outer = + CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1007,7 +1017,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { argument_count_, value_count, outer); - int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1016,7 +1025,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument(argument_index++); + op = new LArgument((*argument_index_accumulator)++); } else { op = UseAny(value); } @@ -1471,10 +1480,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { // We only need a temp register for non-strict compare. - LOperand* temp = instr->is_strict() ? NULL : TempRegister(); - return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp); + LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister(); + return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp); } @@ -1683,7 +1692,13 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { - LOperand* value = UseAtStart(instr->value()); + // If the target is in new space, we'll emit a global cell compare and so + // want the value in a register. If the target gets promoted before we + // emit code, we will still get the register but will do an immediate + // compare instead of the cell compare. This is safe. + LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target()) + ? UseRegisterAtStart(instr->value()) + : UseAtStart(instr->value()); return AssignEnvironment(new LCheckFunction(value)); } @@ -1770,7 +1785,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->check_hole_value() + return instr->RequiresHoleCheck() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1786,8 +1801,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LStoreGlobalCell* result = - new LStoreGlobalCell(UseRegisterAtStart(instr->value())); - return instr->check_hole_value() ? AssignEnvironment(result) : result; + new LStoreGlobalCell(UseTempRegister(instr->value()), + TempRegister(), + TempRegister()); + return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; } @@ -1808,15 +1825,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; LOperand* value; LOperand* temp; + LOperand* context = UseRegister(instr->context()); if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); value = UseTempRegister(instr->value()); temp = TempRegister(); } else { - context = UseRegister(instr->context()); value = UseRegister(instr->value()); temp = NULL; } @@ -1944,7 +1959,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( ASSERT(instr->object()->representation().IsTagged()); ASSERT(instr->key()->representation().IsInteger32()); - LOperand* obj = UseTempRegister(instr->object()); + LOperand* obj = UseRegister(instr->object()); LOperand* val = needs_write_barrier ? UseTempRegister(instr->value()) : UseRegisterAtStart(instr->value()); @@ -2021,9 +2036,14 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* obj = needs_write_barrier - ? UseTempRegister(instr->object()) - : UseRegisterAtStart(instr->object()); + LOperand* obj; + if (needs_write_barrier) { + obj = instr->is_in_object() + ? UseRegister(instr->object()) + : UseTempRegister(instr->object()); + } else { + obj = UseRegisterAtStart(instr->object()); + } LOperand* val = needs_write_barrier ? UseTempRegister(instr->value()) diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 038049ca06..3a06ac358b 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -101,7 +101,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNullAndBranch) \ + V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -615,17 +615,18 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNullAndBranch: public LControlInstruction<1, 1> { +class LIsNilAndBranch: public LControlInstruction<1, 1> { public: - LIsNullAndBranch(LOperand* value, LOperand* temp) { + LIsNilAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) - bool is_strict() const { return hydrogen()->is_strict(); } + EqualityKind kind() const { return hydrogen()->kind(); } + NilValue nil() const { return hydrogen()->nil(); } virtual void PrintDataTo(StringStream* stream); }; @@ -1230,10 +1231,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> { }; -class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> { +class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> { public: - explicit LStoreGlobalCell(LOperand* value) { + explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) { inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") @@ -1798,6 +1801,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function") DECLARE_HYDROGEN_ACCESSOR(CheckFunction) }; @@ -2070,6 +2075,7 @@ class LChunk: public ZoneObject { graph_(graph), instructions_(32), pointer_maps_(8), + num_double_slots_(0), inlined_closures_(1) { } void AddInstruction(LInstruction* instruction, HBasicBlock* block); @@ -2083,6 +2089,8 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } + int num_double_slots() const { return num_double_slots_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } @@ -2124,6 +2132,7 @@ class LChunk: public ZoneObject { HGraph* const graph_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; + int num_double_slots_; ZoneList<Handle<JSFunction> > inlined_closures_; }; @@ -2259,7 +2268,8 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, + int* argument_index_accumulator); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 837112a55c..3aaa22acc9 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -44,7 +44,8 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true) { + allow_stub_calls_(true), + has_frame_(false) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), isolate()); @@ -52,33 +53,75 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) } -void MacroAssembler::RecordWriteHelper(Register object, - Register addr, - Register scratch) { - if (emit_debug_code()) { - // Check that the object is not in new space. - Label not_in_new_space; - InNewSpace(object, scratch, not_equal, ¬_in_new_space); - Abort("new-space object passed to RecordWriteHelper"); - bind(¬_in_new_space); +void MacroAssembler::InNewSpace( + Register object, + Register scratch, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + ASSERT(cc == equal || cc == not_equal); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + mov(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); } + // Check that we can use a test_b. + ASSERT(MemoryChunk::IN_FROM_SPACE < 8); + ASSERT(MemoryChunk::IN_TO_SPACE < 8); + int mask = (1 << MemoryChunk::IN_FROM_SPACE) + | (1 << MemoryChunk::IN_TO_SPACE); + // If non-zero, the page belongs to new-space. + test_b(Operand(scratch, MemoryChunk::kFlagsOffset), + static_cast<uint8_t>(mask)); + j(cc, condition_met, condition_met_distance); +} - // Compute the page start address from the heap object pointer, and reuse - // the 'object' register for it. - and_(object, ~Page::kPageAlignmentMask); - - // Compute number of region covering addr. See Page::GetRegionNumberForAddress - // method for more details. - shr(addr, Page::kRegionSizeLog2); - and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2); - // Set dirty mark for region. - // Bit tests with a memory operand should be avoided on Intel processors, - // as they usually have long latency and multiple uops. We load the bit base - // operand to a register at first and store it back after bit set. - mov(scratch, Operand(object, Page::kDirtyFlagOffset)); - bts(Operand(scratch), addr); - mov(Operand(object, Page::kDirtyFlagOffset), scratch); +void MacroAssembler::RememberedSetHelper( + Register object, // Only used for debug checks. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + MacroAssembler::RememberedSetFinalAction and_then) { + Label done; + if (FLAG_debug_code) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); + int3(); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + mov(scratch, Operand::StaticVariable(store_buffer)); + // Store pointer to buffer. + mov(Operand(scratch, 0), addr); + // Increment buffer top. + add(scratch, Immediate(kPointerSize)); + // Write back new top of buffer. + mov(Operand::StaticVariable(store_buffer), scratch); + // Call stub on end of buffer. + // Check for end of buffer. + test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kReturnAtEnd) { + Label buffer_overflowed; + j(not_equal, &buffer_overflowed, Label::kNear); + ret(0); + bind(&buffer_overflowed); + } else { + ASSERT(and_then == kFallThroughAtEnd); + j(equal, &done, Label::kNear); + } + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(save_fp); + CallStub(&store_buffer_overflow); + if (and_then == kReturnAtEnd) { + ret(0); + } else { + ASSERT(and_then == kFallThroughAtEnd); + bind(&done); + } } @@ -112,100 +155,144 @@ void MacroAssembler::ClampUint8(Register reg) { } -void MacroAssembler::InNewSpace(Register object, - Register scratch, - Condition cc, - Label* branch, - Label::Distance branch_near) { - ASSERT(cc == equal || cc == not_equal); - if (Serializer::enabled()) { - // Can't do arithmetic on external references if it might get serialized. - mov(scratch, Operand(object)); - // The mask isn't really an address. We load it as an external reference in - // case the size of the new space is different between the snapshot maker - // and the running system. - and_(Operand(scratch), - Immediate(ExternalReference::new_space_mask(isolate()))); - cmp(Operand(scratch), - Immediate(ExternalReference::new_space_start(isolate()))); - j(cc, branch, branch_near); - } else { - int32_t new_space_start = reinterpret_cast<int32_t>( - ExternalReference::new_space_start(isolate()).address()); - lea(scratch, Operand(object, -new_space_start)); - and_(scratch, isolate()->heap()->NewSpaceMask()); - j(cc, branch, branch_near); +void MacroAssembler::RecordWriteArray(Register object, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + ASSERT_EQ(0, kSmiTag); + test(value, Immediate(kSmiTagMask)); + j(zero, &done); + } + + // Array access: calculate the destination address in the same manner as + // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset + // into an array of words. + Register dst = index; + lea(dst, Operand(object, index, times_half_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag)); + + RecordWrite( + object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + + bind(&done); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(value, Immediate(BitCast<int32_t>(kZapValue))); + mov(index, Immediate(BitCast<int32_t>(kZapValue))); } } -void MacroAssembler::RecordWrite(Register object, - int offset, - Register value, - Register scratch) { +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { // First, check if a write barrier is even needed. The tests below - // catch stores of Smis and stores into young gen. + // catch stores of Smis. Label done; // Skip barrier if writing a smi. - STATIC_ASSERT(kSmiTag == 0); - JumpIfSmi(value, &done, Label::kNear); - - InNewSpace(object, value, equal, &done, Label::kNear); + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done, Label::kNear); + } - // The offset is relative to a tagged or untagged HeapObject pointer, - // so either offset or offset + kHeapObjectTag must be a - // multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize) || - IsAligned(offset + kHeapObjectTag, kPointerSize)); + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize)); - Register dst = scratch; - if (offset != 0) { - lea(dst, Operand(object, offset)); - } else { - // Array access: calculate the destination address in the same manner as - // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset - // into an array of words. - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - lea(dst, Operand(object, dst, times_half_pointer_size, - FixedArray::kHeaderSize - kHeapObjectTag)); + lea(dst, FieldOperand(object, offset)); + if (emit_debug_code()) { + Label ok; + test_b(dst, (1 << kPointerSizeLog2) - 1); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); } - RecordWriteHelper(object, dst, value); + + RecordWrite( + object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Immediate(BitCast<int32_t>(kZapValue))); mov(value, Immediate(BitCast<int32_t>(kZapValue))); - mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); + mov(dst, Immediate(BitCast<int32_t>(kZapValue))); } } void MacroAssembler::RecordWrite(Register object, Register address, - Register value) { + Register value, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + ASSERT(!object.is(value)); + ASSERT(!object.is(address)); + ASSERT(!value.is(address)); + if (emit_debug_code()) { + AbortIfSmi(object); + } + + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } + + if (FLAG_debug_code) { + Label ok; + cmp(value, Operand(address, 0)); + j(equal, &ok, Label::kNear); + int3(); + bind(&ok); + } + // First, check if a write barrier is even needed. The tests below // catch stores of Smis and stores into young gen. Label done; - // Skip barrier if writing a smi. - STATIC_ASSERT(kSmiTag == 0); - JumpIfSmi(value, &done, Label::kNear); - - InNewSpace(object, value, equal, &done); - - RecordWriteHelper(object, address, value); + if (smi_check == INLINE_SMI_CHECK) { + // Skip barrier if writing a smi. + JumpIfSmi(value, &done, Label::kNear); + } + + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + zero, + &done, + Label::kNear); + + RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + CallStub(&stub); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Immediate(BitCast<int32_t>(kZapValue))); mov(address, Immediate(BitCast<int32_t>(kZapValue))); mov(value, Immediate(BitCast<int32_t>(kZapValue))); } @@ -224,7 +311,7 @@ void MacroAssembler::DebugBreak() { void MacroAssembler::Set(Register dst, const Immediate& x) { if (x.is_zero()) { - xor_(dst, Operand(dst)); // Shorter than mov. + xor_(dst, dst); // Shorter than mov. } else { mov(dst, x); } @@ -287,13 +374,111 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_ELEMENTS == 0); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastSmiOnlyElementValue); + j(below_equal, fail, distance); cmpb(FieldOperand(map, Map::kBitField2Offset), Map::kMaximumBitField2FastElementValue); j(above, fail, distance); } +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastSmiOnlyElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::StoreNumberToDoubleElements( + Register maybe_number, + Register elements, + Register key, + Register scratch1, + XMMRegister scratch2, + Label* fail, + bool specialize_for_processor) { + Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; + JumpIfSmi(maybe_number, &smi_value, Label::kNear); + + CheckMap(maybe_number, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Double value, canonicalize NaN. + uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); + cmp(FieldOperand(maybe_number, offset), + Immediate(kNaNOrInfinityLowerBoundUpper32)); + j(greater_equal, &maybe_nan, Label::kNear); + + bind(¬_nan); + ExternalReference canonical_nan_reference = + ExternalReference::address_of_canonical_non_hole_nan(); + if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { + CpuFeatures::Scope use_sse2(SSE2); + movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), + scratch2); + } else { + fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize)); + } + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + j(greater, &is_nan, Label::kNear); + cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); + j(zero, ¬_nan); + bind(&is_nan); + if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { + CpuFeatures::Scope use_sse2(SSE2); + movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); + } else { + fld_d(Operand::StaticVariable(canonical_nan_reference)); + } + jmp(&have_double_value, Label::kNear); + + bind(&smi_value); + // Value is a smi. Convert to a double and store. + // Preserve original value. + mov(scratch1, maybe_number); + SmiUntag(scratch1); + if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { + CpuFeatures::Scope fscope(SSE2); + cvtsi2sd(scratch2, scratch1); + movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), + scratch2); + } else { + push(scratch1); + fild_s(Operand(esp, 0)); + pop(scratch1); + fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize)); + } + bind(&done); +} + + void MacroAssembler::CheckMap(Register obj, Handle<Map> map, Label* fail, @@ -345,7 +530,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch, Label* fail) { movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset)); - sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); cmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); j(above, fail); @@ -402,7 +587,7 @@ void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::EnterFrame(StackFrame::Type type) { push(ebp); - mov(ebp, Operand(esp)); + mov(ebp, esp); push(esi); push(Immediate(Smi::FromInt(type))); push(Immediate(CodeObject())); @@ -429,7 +614,7 @@ void MacroAssembler::EnterExitFramePrologue() { ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); push(ebp); - mov(ebp, Operand(esp)); + mov(ebp, esp); // Reserve room for entry stack pointer and push the code object. ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); @@ -451,14 +636,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { if (save_doubles) { CpuFeatures::Scope scope(SSE2); int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; - sub(Operand(esp), Immediate(space)); + sub(esp, Immediate(space)); const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); } } else { - sub(Operand(esp), Immediate(argc * kPointerSize)); + sub(esp, Immediate(argc * kPointerSize)); } // Get the required frame alignment for the OS. @@ -478,7 +663,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // Setup argc and argv in callee-saved registers. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - mov(edi, Operand(eax)); + mov(edi, eax); lea(esi, Operand(ebp, eax, times_4, offset)); // Reserve space for argc, argv and isolate. @@ -532,7 +717,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() { void MacroAssembler::LeaveApiExitFrame() { - mov(esp, Operand(ebp)); + mov(esp, ebp); pop(ebp); LeaveExitFrameEpilogue(); @@ -580,7 +765,7 @@ void MacroAssembler::PopTryHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress, isolate()))); - add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); + add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize)); } @@ -612,7 +797,7 @@ void MacroAssembler::Throw(Register value) { // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any // of them. Label skip; - cmp(Operand(edx), Immediate(StackHandler::ENTRY)); + cmp(edx, Immediate(StackHandler::ENTRY)); j(equal, &skip, Label::kNear); mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); bind(&skip); @@ -696,7 +881,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // When generating debug code, make sure the lexical context is set. if (emit_debug_code()) { - cmp(Operand(scratch), Immediate(0)); + cmp(scratch, Immediate(0)); Check(not_equal, "we should not have an empty lexical context"); } // Load the global context of the current context. @@ -784,23 +969,23 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r1, r0); not_(r0); shl(r1, 15); - add(r0, Operand(r1)); + add(r0, r1); // hash = hash ^ (hash >> 12); mov(r1, r0); shr(r1, 12); - xor_(r0, Operand(r1)); + xor_(r0, r1); // hash = hash + (hash << 2); lea(r0, Operand(r0, r0, times_4, 0)); // hash = hash ^ (hash >> 4); mov(r1, r0); shr(r1, 4); - xor_(r0, Operand(r1)); + xor_(r0, r1); // hash = hash * 2057; imul(r0, r0, 2057); // hash = hash ^ (hash >> 16); mov(r1, r0); shr(r1, 16); - xor_(r0, Operand(r1)); + xor_(r0, r1); // Compute capacity mask. mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset)); @@ -814,9 +999,9 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r2, r0); // Compute the masked index: (hash + i + i * i) & mask. if (i > 0) { - add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i))); + add(r2, Immediate(NumberDictionary::GetProbeOffset(i))); } - and_(r2, Operand(r1)); + and_(r2, r1); // Scale the index by multiplying by the entry size. ASSERT(NumberDictionary::kEntrySize == 3); @@ -872,7 +1057,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, if (scratch.is(no_reg)) { mov(result, Operand::StaticVariable(new_space_allocation_top)); } else { - mov(Operand(scratch), Immediate(new_space_allocation_top)); + mov(scratch, Immediate(new_space_allocation_top)); mov(result, Operand(scratch, 0)); } } @@ -931,7 +1116,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size, if (!top_reg.is(result)) { mov(top_reg, result); } - add(Operand(top_reg), Immediate(object_size)); + add(top_reg, Immediate(object_size)); j(carry, gc_required); cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -942,12 +1127,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Tag result if requested. if (top_reg.is(result)) { if ((flags & TAG_OBJECT) != 0) { - sub(Operand(result), Immediate(object_size - kHeapObjectTag)); + sub(result, Immediate(object_size - kHeapObjectTag)); } else { - sub(Operand(result), Immediate(object_size)); + sub(result, Immediate(object_size)); } } else if ((flags & TAG_OBJECT) != 0) { - add(Operand(result), Immediate(kHeapObjectTag)); + add(result, Immediate(kHeapObjectTag)); } } @@ -985,7 +1170,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size, // We assume that element_count*element_size + header_size does not // overflow. lea(result_end, Operand(element_count, element_size, header_size)); - add(result_end, Operand(result)); + add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1030,7 +1215,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, if (!object_size.is(result_end)) { mov(result_end, object_size); } - add(result_end, Operand(result)); + add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1050,7 +1235,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) { ExternalReference::new_space_allocation_top_address(isolate()); // Make sure the object has no tag before resetting top. - and_(Operand(object), Immediate(~kHeapObjectTagMask)); + and_(object, Immediate(~kHeapObjectTagMask)); #ifdef DEBUG cmp(object, Operand::StaticVariable(new_space_allocation_top)); Check(below, "Undo allocation of non allocated memory"); @@ -1089,7 +1274,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, ASSERT(kShortSize == 2); // scratch1 = length * 2 + kObjectAlignmentMask. lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); - and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate two byte string in new space. AllocateInNewSpace(SeqTwoByteString::kHeaderSize, @@ -1123,8 +1308,8 @@ void MacroAssembler::AllocateAsciiString(Register result, ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); mov(scratch1, length); ASSERT(kCharSize == 1); - add(Operand(scratch1), Immediate(kObjectAlignmentMask)); - and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); + add(scratch1, Immediate(kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate ascii string in new space. AllocateInNewSpace(SeqAsciiString::kHeaderSize, @@ -1258,7 +1443,7 @@ void MacroAssembler::CopyBytes(Register source, Register scratch) { Label loop, done, short_string, short_loop; // Experimentation shows that the short string loop is faster if length < 10. - cmp(Operand(length), Immediate(10)); + cmp(length, Immediate(10)); j(less_equal, &short_string); ASSERT(source.is(esi)); @@ -1273,12 +1458,12 @@ void MacroAssembler::CopyBytes(Register source, mov(scratch, ecx); shr(ecx, 2); rep_movs(); - and_(Operand(scratch), Immediate(0x3)); - add(destination, Operand(scratch)); + and_(scratch, Immediate(0x3)); + add(destination, scratch); jmp(&done); bind(&short_string); - test(length, Operand(length)); + test(length, length); j(zero, &done); bind(&short_loop); @@ -1293,13 +1478,27 @@ void MacroAssembler::CopyBytes(Register source, } +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + jmp(&entry); + bind(&loop); + mov(Operand(start_offset, 0), filler); + add(start_offset, Immediate(kPointerSize)); + bind(&entry); + cmp(start_offset, end_offset); + j(less, &loop); +} + + void MacroAssembler::NegativeZeroTest(Register result, Register op, Label* then_label) { Label ok; - test(result, Operand(result)); + test(result, result); j(not_zero, &ok); - test(op, Operand(op)); + test(op, op); j(sign, then_label); bind(&ok); } @@ -1311,10 +1510,10 @@ void MacroAssembler::NegativeZeroTest(Register result, Register scratch, Label* then_label) { Label ok; - test(result, Operand(result)); + test(result, result); j(not_zero, &ok); - mov(scratch, Operand(op1)); - or_(scratch, Operand(op2)); + mov(scratch, op1); + or_(scratch, op2); j(sign, then_label); bind(&ok); } @@ -1344,7 +1543,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, // If the prototype or initial map is the hole, don't return it and // simply miss the cache instead. This will allow us to allocate a // prototype object on-demand in the runtime system. - cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value())); + cmp(result, Immediate(isolate()->factory()->the_hole_value())); j(equal, miss); // If the function does not have an initial map, we're done. @@ -1367,13 +1566,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1384,13 +1583,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); jmp(stub->GetCode(), RelocInfo::CODE_TARGET); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1406,9 +1604,15 @@ void MacroAssembler::StubReturn(int argc) { } +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); +} + + void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { - add(Operand(esp), Immediate(num_arguments * kPointerSize)); + add(esp, Immediate(num_arguments * kPointerSize)); } mov(eax, Immediate(isolate()->factory()->undefined_value())); } @@ -1442,8 +1646,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); Set(eax, Immediate(function->nargs)); mov(ebx, Immediate(ExternalReference(function, isolate()))); - CEntryStub ces(1); - ces.SaveDoubles(); + CEntryStub ces(1, kSaveFPRegs); CallStub(&ces); } @@ -1623,7 +1826,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function, Label leave_exit_frame; // Check if the result handle holds 0. - test(eax, Operand(eax)); + test(eax, eax); j(zero, &empty_handle); // It was non-zero. Dereference to get the result value. mov(eax, Operand(eax, 0)); @@ -1664,7 +1867,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function, mov(edi, eax); mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address())); mov(eax, Immediate(delete_extensions)); - call(Operand(eax)); + call(eax); mov(eax, edi); jmp(&leave_exit_frame); @@ -1698,10 +1901,10 @@ void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { if (call_kind == CALL_AS_FUNCTION) { // Set to some non-zero smi by updating the least significant // byte. - mov_b(Operand(dst), 1 << kSmiTagSize); + mov_b(dst, 1 << kSmiTagSize); } else { // Set to smi zero by clearing the register. - xor_(dst, Operand(dst)); + xor_(dst, dst); } } @@ -1746,7 +1949,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } else if (!expected.reg().is(actual.reg())) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. - cmp(expected.reg(), Operand(actual.reg())); + cmp(expected.reg(), actual.reg()); j(equal, &invoke); ASSERT(actual.reg().is(eax)); ASSERT(expected.reg().is(ebx)); @@ -1758,7 +1961,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (!code_constant.is_null()) { mov(edx, Immediate(code_constant)); - add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); + add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); } else if (!code_operand.is_reg(edx)) { mov(edx, code_operand); } @@ -1784,6 +1987,9 @@ void MacroAssembler::InvokeCode(const Operand& code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, Label::kNear, call_wrapper, @@ -1809,8 +2015,11 @@ void MacroAssembler::InvokeCode(Handle<Code> code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; - Operand dummy(eax); + Operand dummy(eax, 0); InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear, call_wrapper, call_kind); if (flag == CALL_FUNCTION) { @@ -1832,6 +2041,9 @@ void MacroAssembler::InvokeFunction(Register fun, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(fun.is(edi)); mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); @@ -1849,6 +2061,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function->is_compiled()); // Get the function and setup the context. mov(edi, Immediate(Handle<JSFunction>(function))); @@ -1872,8 +2087,8 @@ void MacroAssembler::InvokeFunction(JSFunction* function, void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { - // Calls are not allowed in some stubs. - ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); + // You can't call a builtin without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a @@ -1884,6 +2099,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, expected, expected, flag, call_wrapper, CALL_AS_METHOD); } + void MacroAssembler::GetBuiltinFunction(Register target, Builtins::JavaScript id) { // Load the JavaScript builtin function from the builtins object. @@ -1893,6 +2109,7 @@ void MacroAssembler::GetBuiltinFunction(Register target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); } + void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { ASSERT(!target.is(edi)); // Load the JavaScript builtin function from the builtins object. @@ -1994,7 +2211,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { ret(bytes_dropped); } else { pop(scratch); - add(Operand(esp), Immediate(bytes_dropped)); + add(esp, Immediate(bytes_dropped)); push(scratch); ret(0); } @@ -2005,7 +2222,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { void MacroAssembler::Drop(int stack_elements) { if (stack_elements > 0) { - add(Operand(esp), Immediate(stack_elements * kPointerSize)); + add(esp, Immediate(stack_elements * kPointerSize)); } } @@ -2148,13 +2365,19 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif - // Disable stub call restrictions to always allow calls to abort. - AllowStubCallsScope allow_scope(this, true); push(eax); push(Immediate(p0)); push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)))); - CallRuntime(Runtime::kAbort, 2); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 2); + } else { + CallRuntime(Runtime::kAbort, 2); + } // will not return here int3(); } @@ -2177,7 +2400,7 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst, ASSERT(is_uintn(power + HeapNumber::kExponentBias, HeapNumber::kExponentBits)); mov(scratch, Immediate(power + HeapNumber::kExponentBias)); - movd(dst, Operand(scratch)); + movd(dst, scratch); psllq(dst, HeapNumber::kMantissaBits); } @@ -2203,8 +2426,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1, Label* failure) { // Check that both objects are not smis. STATIC_ASSERT(kSmiTag == 0); - mov(scratch1, Operand(object1)); - and_(scratch1, Operand(object2)); + mov(scratch1, object1); + and_(scratch1, object2); JumpIfSmi(scratch1, failure); // Load instance type for both strings. @@ -2233,12 +2456,12 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { // Make stack end at alignment and make room for num_arguments words // and the original value of esp. mov(scratch, esp); - sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize)); + sub(esp, Immediate((num_arguments + 1) * kPointerSize)); ASSERT(IsPowerOf2(frame_alignment)); and_(esp, -frame_alignment); mov(Operand(esp, num_arguments * kPointerSize), scratch); } else { - sub(Operand(esp), Immediate(num_arguments * kPointerSize)); + sub(esp, Immediate(num_arguments * kPointerSize)); } } @@ -2246,27 +2469,39 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { // Trashing eax is ok as it will be the return value. - mov(Operand(eax), Immediate(function)); + mov(eax, Immediate(function)); CallCFunction(eax, num_arguments); } void MacroAssembler::CallCFunction(Register function, int num_arguments) { + ASSERT(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); } - call(Operand(function)); + call(function); if (OS::ActivationFrameAlignment() != 0) { mov(esp, Operand(esp, num_arguments * kPointerSize)); } else { - add(Operand(esp), Immediate(num_arguments * kPointerSize)); + add(esp, Immediate(num_arguments * kPointerSize)); } } +bool AreAliased(Register r1, Register r2, Register r3, Register r4) { + if (r1.is(r2)) return true; + if (r1.is(r3)) return true; + if (r1.is(r4)) return true; + if (r2.is(r3)) return true; + if (r2.is(r4)) return true; + if (r3.is(r4)) return true; + return false; +} + + CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), @@ -2288,6 +2523,198 @@ CodePatcher::~CodePatcher() { } +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + ASSERT(cc == zero || cc == not_zero); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + mov(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); + } + if (mask < (1 << kBitsPerByte)) { + test_b(Operand(scratch, MemoryChunk::kFlagsOffset), + static_cast<uint8_t>(mask)); + } else { + test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); + } + j(cc, condition_met, condition_met_distance); +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_near) { + HasColor(object, scratch0, scratch1, + on_black, on_black_near, + 1, 0); // kBlackBitPattern. + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + Label::Distance has_color_distance, + int first_bit, + int second_bit) { + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color, word_boundary; + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); + add(mask_scratch, mask_scratch); // Shift left 1 by adding. + j(zero, &word_boundary, Label::kNear); + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); + jmp(&other_color, Label::kNear); + + bind(&word_boundary); + test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1); + + j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); + bind(&other_color); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); + mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); + and_(bitmap_reg, addr_reg); + mov(ecx, addr_reg); + int shift = + Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; + shr(ecx, shift); + and_(ecx, + (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); + + add(bitmap_reg, ecx); + mov(ecx, addr_reg); + shr(ecx, kPointerSizeLog2); + and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); + mov(mask_reg, Immediate(1)); + shl_cl(mask_reg); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Label* value_is_white_and_not_data, + Label::Distance distance) { + ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(not_zero, &done, Label::kNear); + + if (FLAG_debug_code) { + // Check for impossible bit pattern. + Label ok; + push(mask_scratch); + // shl. May overflow making the check conservative. + add(mask_scratch, mask_scratch); + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + pop(mask_scratch); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = ecx; // Holds map while checking type. + Register length = ecx; // Holds length of object after checking type. + Label not_heap_number; + Label is_data_object; + + // Check for heap-number + mov(map, FieldOperand(value, HeapObject::kMapOffset)); + cmp(map, FACTORY->heap_number_map()); + j(not_equal, ¬_heap_number, Label::kNear); + mov(length, Immediate(HeapNumber::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_heap_number); + // Check for strings. + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = ecx; + movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); + j(not_zero, value_is_white_and_not_data); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + Label not_external; + // External strings are the only ones with the kExternalStringTag bit + // set. + ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); + ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + test_b(instance_type, kExternalStringTag); + j(zero, ¬_external, Label::kNear); + mov(length, Immediate(ExternalString::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_external); + // Sequential string, either ASCII or UC16. + ASSERT(kAsciiStringTag == 0x04); + and_(length, Immediate(kStringEncodingMask)); + xor_(length, Immediate(kStringEncodingMask)); + add(length, Immediate(0x04)); + // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted + // by 2. If we multiply the string length as smi by this, it still + // won't overflow a 32-bit value. + ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize); + ASSERT(SeqAsciiString::kMaxSize <= + static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); + imul(length, FieldOperand(value, String::kLengthOffset)); + shr(length, 2 + kSmiTagSize + kSmiShiftSize); + add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, Immediate(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + + and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); + add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), + length); + if (FLAG_debug_code) { + mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset)); + Check(less_equal, "Live Bytes Count overflow chunk size"); + } + + bind(&done); +} + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 1906644c35..a1b42c280c 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -29,6 +29,7 @@ #define V8_IA32_MACRO_ASSEMBLER_IA32_H_ #include "assembler.h" +#include "frames.h" #include "v8globals.h" namespace v8 { @@ -50,6 +51,13 @@ enum AllocationFlags { // distinguish memory operands from other operands on ia32. typedef Operand MemOperand; +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; + + +bool AreAliased(Register r1, Register r2, Register r3, Register r4); + + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -61,42 +69,130 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // GC Support + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, zero, branch, distance); + } - // For page containing |object| mark region covering |addr| dirty. - // RecordWriteHelper only works if the object is not in new - // space. - void RecordWriteHelper(Register object, - Register addr, - Register scratch); + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, not_zero, branch, distance); + } - // Check if object is in new space. - // scratch can be object itself, but it will be clobbered. - void InNewSpace(Register object, - Register scratch, - Condition cc, // equal for new space, not_equal otherwise. - Label* branch, - Label::Distance branch_near = Label::kFar); + // Check if an object has a given incremental marking color. Also uses ecx! + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + Label::Distance has_color_distance, + int first_bit, + int second_bit); + + void JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_distance = Label::kFar); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Label* object_is_white_and_not_data, + Label::Distance distance); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // As above, but the offset has the tag presubtracted. For use with + // Operand(reg, off). + void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + save_fp, + remembered_set_action, + smi_check); + } - // For page containing |object| mark region covering [object+offset] - // dirty. |object| is the object being stored into, |value| is the - // object being stored. If offset is zero, then the scratch register - // contains the array index into the elements array represented as a - // Smi. All registers are clobbered by the operation. RecordWrite + // Notify the garbage collector that we wrote a pointer into a fixed array. + // |array| is the array being stored into, |value| is the + // object being stored. |index| is the array index represented as a + // Smi. All registers are clobbered by the operation RecordWriteArray // filters out smis so it does not update the write barrier if the // value is a smi. - void RecordWrite(Register object, - int offset, - Register value, - Register scratch); + void RecordWriteArray( + Register array, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); // For page containing |object| mark region covering |address| // dirty. |object| is the object being stored into, |value| is the - // object being stored. All registers are clobbered by the + // object being stored. The address and value registers are clobbered by the // operation. RecordWrite filters out smis so it does not update the // write barrier if the value is a smi. - void RecordWrite(Register object, - Register address, - Register value); + void RecordWrite( + Register object, + Register address, + Register value, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); #ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- @@ -105,15 +201,6 @@ class MacroAssembler: public Assembler { void DebugBreak(); #endif - // --------------------------------------------------------------------------- - // Activation frames - - void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } - void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } - - void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } - void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame. Expects the number of // arguments in register eax and sets up the number of arguments in // register edi and the pointer to the first argument in register @@ -159,6 +246,15 @@ class MacroAssembler: public Assembler { void SetCallKind(Register dst, CallKind kind); // Invoke the JavaScript function code by either calling or jumping. + void InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper, + CallKind call_kind) { + InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind); + } + void InvokeCode(const Operand& code, const ParameterCount& expected, const ParameterCount& actual, @@ -225,6 +321,29 @@ class MacroAssembler: public Assembler { Label* fail, Label::Distance distance = Label::kFar); + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + void StoreNumberToDoubleElements(Register maybe_number, + Register elements, + Register key, + Register scratch1, + XMMRegister scratch2, + Label* fail, + bool specialize_for_processor); + // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a // heap object) @@ -277,7 +396,7 @@ class MacroAssembler: public Assembler { void SmiTag(Register reg) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); - add(reg, Operand(reg)); + add(reg, reg); } void SmiUntag(Register reg) { sar(reg, kSmiTagSize); @@ -465,6 +584,13 @@ class MacroAssembler: public Assembler { Register length, Register scratch); + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + // --------------------------------------------------------------------------- // Support functions. @@ -667,6 +793,9 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); // --------------------------------------------------------------------------- // String utilities. @@ -690,9 +819,14 @@ class MacroAssembler: public Assembler { return SafepointRegisterStackIndex(reg.code()); } + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + private: bool generating_stub_; bool allow_stub_calls_; + bool has_frame_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; @@ -703,14 +837,10 @@ class MacroAssembler: public Assembler { const Operand& code_operand, Label* done, InvokeFlag flag, - Label::Distance done_near = Label::kFar, + Label::Distance done_distance, const CallWrapper& call_wrapper = NullCallWrapper(), CallKind call_kind = CALL_AS_METHOD); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - void EnterExitFramePrologue(); void EnterExitFrameEpilogue(int argc, bool save_doubles); @@ -729,6 +859,20 @@ class MacroAssembler: public Assembler { Register scratch, bool gc_allowed); + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Uses ecx as scratch and leaves addr_reg + // unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); // Compute memory operands for safepoint stack slots. Operand SafepointRegisterSlot(Register reg); diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index d175d9e036..8b0b9ab911 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2008-2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -134,7 +134,7 @@ int RegExpMacroAssemblerIA32::stack_limit_slack() { void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) { if (by != 0) { - __ add(Operand(edi), Immediate(by * char_size())); + __ add(edi, Immediate(by * char_size())); } } @@ -152,8 +152,8 @@ void RegExpMacroAssemblerIA32::Backtrack() { CheckPreemption(); // Pop Code* offset from backtrack stack, add Code* and jump to location. Pop(ebx); - __ add(Operand(ebx), Immediate(masm_->CodeObject())); - __ jmp(Operand(ebx)); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); } @@ -219,7 +219,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str, int byte_offset = cp_offset * char_size(); if (check_end_of_string) { // Check that there are at least str.length() characters left in the input. - __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length))); + __ cmp(edi, Immediate(-(byte_offset + byte_length))); BranchOrBacktrack(greater, on_failure); } @@ -288,7 +288,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) { Label fallthrough; __ cmp(edi, Operand(backtrack_stackpointer(), 0)); __ j(not_equal, &fallthrough); - __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop. + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop. BranchOrBacktrack(no_condition, on_equal); __ bind(&fallthrough); } @@ -300,7 +300,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( Label fallthrough; __ mov(edx, register_location(start_reg)); // Index of start of capture __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture - __ sub(ebx, Operand(edx)); // Length of capture. + __ sub(ebx, edx); // Length of capture. // The length of a capture should not be negative. This can only happen // if the end of the capture is unrecorded, or at a point earlier than @@ -320,9 +320,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ push(backtrack_stackpointer()); // After this, the eax, ecx, and edi registers are available. - __ add(edx, Operand(esi)); // Start of capture - __ add(edi, Operand(esi)); // Start of text to match against capture. - __ add(ebx, Operand(edi)); // End of text to match against capture. + __ add(edx, esi); // Start of capture + __ add(edi, esi); // Start of text to match against capture. + __ add(ebx, edi); // End of text to match against capture. Label loop; __ bind(&loop); @@ -339,15 +339,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ movzx_b(ecx, Operand(edx, 0)); __ or_(ecx, 0x20); - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(not_equal, &fail); __ bind(&loop_increment); // Increment pointers into match and capture strings. - __ add(Operand(edx), Immediate(1)); - __ add(Operand(edi), Immediate(1)); + __ add(edx, Immediate(1)); + __ add(edi, Immediate(1)); // Compare to end of match, and loop if not done. - __ cmp(edi, Operand(ebx)); + __ cmp(edi, ebx); __ j(below, &loop); __ jmp(&success); @@ -361,9 +361,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Restore original value before continuing. __ pop(backtrack_stackpointer()); // Drop original value of character position. - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); // Compute new value of character position after the matched part. - __ sub(edi, Operand(esi)); + __ sub(edi, esi); } else { ASSERT(mode_ == UC16); // Save registers before calling C function. @@ -389,16 +389,19 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Set byte_offset2. // Found by adding negative string-end offset of current position (edi) // to end of string. - __ add(edi, Operand(esi)); + __ add(edi, esi); __ mov(Operand(esp, 1 * kPointerSize), edi); // Set byte_offset1. // Start of capture, where edx already holds string-end negative offset. - __ add(edx, Operand(esi)); + __ add(edx, esi); __ mov(Operand(esp, 0 * kPointerSize), edx); - ExternalReference compare = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(compare, argument_count); + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(compare, argument_count); + } // Pop original values before reacting on result value. __ pop(ebx); __ pop(backtrack_stackpointer()); @@ -406,10 +409,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ pop(esi); // Check if function returned non-zero for success or zero for failure. - __ or_(eax, Operand(eax)); + __ or_(eax, eax); BranchOrBacktrack(zero, on_no_match); // On success, increment position by length of capture. - __ add(edi, Operand(ebx)); + __ add(edi, ebx); } __ bind(&fallthrough); } @@ -425,7 +428,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Find length of back-referenced capture. __ mov(edx, register_location(start_reg)); __ mov(eax, register_location(start_reg + 1)); - __ sub(eax, Operand(edx)); // Length to check. + __ sub(eax, edx); // Length to check. // Fail on partial or illegal capture (start of capture after end of capture). BranchOrBacktrack(less, on_no_match); // Succeed on empty capture (including no capture) @@ -433,7 +436,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Check that there are sufficient characters left in the input. __ mov(ebx, edi); - __ add(ebx, Operand(eax)); + __ add(ebx, eax); BranchOrBacktrack(greater, on_no_match); // Save register to make it available below. @@ -441,7 +444,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Compute pointers to match string and capture string __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match. - __ add(edx, Operand(esi)); // Start of capture. + __ add(edx, esi); // Start of capture. __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match Label loop; @@ -456,10 +459,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( } __ j(not_equal, &fail); // Increment pointers into capture and match string. - __ add(Operand(edx), Immediate(char_size())); - __ add(Operand(ebx), Immediate(char_size())); + __ add(edx, Immediate(char_size())); + __ add(ebx, Immediate(char_size())); // Check if we have reached end of match area. - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(below, &loop); __ jmp(&success); @@ -471,7 +474,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( __ bind(&success); // Move current character position to position after match. __ mov(edi, ecx); - __ sub(Operand(edi), esi); + __ sub(edi, esi); // Restore backtrack stackpointer. __ pop(backtrack_stackpointer()); @@ -574,17 +577,17 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, return true; case '.': { // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) - __ mov(Operand(eax), current_character()); - __ xor_(Operand(eax), Immediate(0x01)); + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c - __ sub(Operand(eax), Immediate(0x0b)); + __ sub(eax, Immediate(0x0b)); __ cmp(eax, 0x0c - 0x0b); BranchOrBacktrack(below_equal, on_no_match); if (mode_ == UC16) { // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. - __ sub(Operand(eax), Immediate(0x2028 - 0x0b)); + __ sub(eax, Immediate(0x2028 - 0x0b)); __ cmp(eax, 0x2029 - 0x2028); BranchOrBacktrack(below_equal, on_no_match); } @@ -593,7 +596,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 'w': { if (mode_ != ASCII) { // Table is 128 entries, so all ASCII characters can be tested. - __ cmp(Operand(current_character()), Immediate('z')); + __ cmp(current_character(), Immediate('z')); BranchOrBacktrack(above, on_no_match); } ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. @@ -607,7 +610,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, Label done; if (mode_ != ASCII) { // Table is 128 entries, so all ASCII characters can be tested. - __ cmp(Operand(current_character()), Immediate('z')); + __ cmp(current_character(), Immediate('z')); __ j(above, &done); } ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. @@ -627,10 +630,10 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 'n': { // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029). // The opposite of '.'. - __ mov(Operand(eax), current_character()); - __ xor_(Operand(eax), Immediate(0x01)); + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c - __ sub(Operand(eax), Immediate(0x0b)); + __ sub(eax, Immediate(0x0b)); __ cmp(eax, 0x0c - 0x0b); if (mode_ == ASCII) { BranchOrBacktrack(above, on_no_match); @@ -641,7 +644,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. - __ sub(Operand(eax), Immediate(0x2028 - 0x0b)); + __ sub(eax, Immediate(0x2028 - 0x0b)); __ cmp(eax, 1); BranchOrBacktrack(above, on_no_match); __ bind(&done); @@ -668,7 +671,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { // Entry code: __ bind(&entry_label_); - // Start new stack frame. + + // Tell the system that we have a stack frame. Because the type is MANUAL, no + // code is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. __ push(ebp); __ mov(ebp, esp); // Save callee-save registers. Order here should correspond to order of @@ -699,7 +707,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ bind(&stack_limit_hit); CallCheckStackGuardState(ebx); - __ or_(eax, Operand(eax)); + __ or_(eax, eax); // If returned value is non-zero, we exit with the returned value as result. __ j(not_zero, &exit_label_); @@ -708,13 +716,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ mov(ebx, Operand(ebp, kStartIndex)); // Allocate space on stack for registers. - __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize)); + __ sub(esp, Immediate(num_registers_ * kPointerSize)); // Load string length. __ mov(esi, Operand(ebp, kInputEnd)); // Load input position. __ mov(edi, Operand(ebp, kInputStart)); // Set up edi to be negative offset from string end. - __ sub(edi, Operand(esi)); + __ sub(edi, esi); // Set eax to address of char before start of the string. // (effectively string position -1). @@ -736,7 +744,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { Label init_loop; __ bind(&init_loop); __ mov(Operand(ebp, ecx, times_1, +0), eax); - __ sub(Operand(ecx), Immediate(kPointerSize)); + __ sub(ecx, Immediate(kPointerSize)); __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); __ j(greater, &init_loop); } @@ -777,12 +785,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { if (mode_ == UC16) { __ lea(ecx, Operand(ecx, edx, times_2, 0)); } else { - __ add(ecx, Operand(edx)); + __ add(ecx, edx); } for (int i = 0; i < num_saved_registers_; i++) { __ mov(eax, register_location(i)); // Convert to index from start of string, not end. - __ add(eax, Operand(ecx)); + __ add(eax, ecx); if (mode_ == UC16) { __ sar(eax, 1); // Convert byte index to character index. } @@ -819,7 +827,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ push(edi); CallCheckStackGuardState(ebx); - __ or_(eax, Operand(eax)); + __ or_(eax, eax); // If returning non-zero, we should end execution with the given // result as return value. __ j(not_zero, &exit_label_); @@ -854,7 +862,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. - __ or_(eax, Operand(eax)); + __ or_(eax, eax); __ j(equal, &exit_with_exception); // Otherwise use return value as new stack pointer. __ mov(backtrack_stackpointer(), eax); @@ -1183,8 +1191,8 @@ void RegExpMacroAssemblerIA32::SafeCall(Label* to) { void RegExpMacroAssemblerIA32::SafeReturn() { __ pop(ebx); - __ add(Operand(ebx), Immediate(masm_->CodeObject())); - __ jmp(Operand(ebx)); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); } @@ -1196,14 +1204,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) { void RegExpMacroAssemblerIA32::Push(Register source) { ASSERT(!source.is(backtrack_stackpointer())); // Notice: This updates flags, unlike normal Push. - __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), source); } void RegExpMacroAssemblerIA32::Push(Immediate value) { // Notice: This updates flags, unlike normal Push. - __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), value); } @@ -1212,7 +1220,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) { ASSERT(!target.is(backtrack_stackpointer())); __ mov(target, Operand(backtrack_stackpointer(), 0)); // Notice: This updates flags, unlike normal Pop. - __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); } diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index ab62764e64..07cb14d025 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -66,8 +66,8 @@ static void ProbeTable(Isolate* isolate, __ j(not_equal, &miss); // Jump to the first instruction in the code stub. - __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(extra)); + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(extra); __ bind(&miss); } else { @@ -92,8 +92,8 @@ static void ProbeTable(Isolate* isolate, __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); // Jump to the first instruction in the code stub. - __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(offset)); + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(offset); // Pop at miss. __ bind(&miss); @@ -204,8 +204,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(scratch, flags); __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize); - __ sub(scratch, Operand(name)); - __ add(Operand(scratch), Immediate(flags)); + __ sub(scratch, name); + __ add(scratch, Immediate(flags)); __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize); // Probe the secondary table. @@ -318,7 +318,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); - __ mov(eax, Operand(scratch1)); + __ mov(eax, scratch1); __ ret(0); } @@ -406,7 +406,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) { // frame. // ----------------------------------- __ pop(scratch); - __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments)); + __ add(esp, Immediate(kPointerSize * kFastApiCallArguments)); __ push(scratch); } @@ -462,7 +462,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm, __ PrepareCallApiFunction(kApiArgc + kApiStackSpace); __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_. - __ add(Operand(eax), Immediate(argc * kPointerSize)); + __ add(eax, Immediate(argc * kPointerSize)); __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_. __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_. // v8::Arguments::is_construct_call_. @@ -651,7 +651,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { scratch1, scratch2, scratch3, name, miss_label); - __ EnterInternalFrame(); + FrameScope scope(masm, StackFrame::INTERNAL); // Save the name_ register across the call. __ push(name_); @@ -668,7 +668,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - __ LeaveInternalFrame(); + + // Leave the internal frame. } void LoadWithInterceptor(MacroAssembler* masm, @@ -676,19 +677,21 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder, JSObject* holder_obj, Label* interceptor_succeeded) { - __ EnterInternalFrame(); - __ push(holder); // Save the holder. - __ push(name_); // Save the name. - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(holder); // Save the holder. + __ push(name_); // Save the name. + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + // Leave the internal frame. + } __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel()); __ j(not_equal, interceptor_succeeded); @@ -786,8 +789,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(eax)); - __ RecordWrite(receiver_reg, offset, name_reg, scratch); + __ mov(name_reg, eax); + __ RecordWriteField(receiver_reg, + offset, + name_reg, + scratch, + kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -797,8 +804,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(eax)); - __ RecordWrite(scratch, offset, name_reg, receiver_reg); + __ mov(name_reg, eax); + __ RecordWriteField(scratch, + offset, + name_reg, + receiver_reg, + kDontSaveFPRegs); } // Return the value (register eax). @@ -932,7 +943,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, } else if (heap()->InNewSpace(prototype)) { // Get the map of the current object. __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); - __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map()))); + __ cmp(scratch1, Immediate(Handle<Map>(current->map()))); // Branch on the result of the map check. __ j(not_equal, miss); // Check access rights to the global object. This has to happen @@ -1053,7 +1064,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ pop(scratch3); // Get return address to place it below. __ push(receiver); // receiver - __ mov(scratch2, Operand(esp)); + __ mov(scratch2, esp); ASSERT(!scratch2.is(reg)); __ push(reg); // holder // Push data from AccessorInfo. @@ -1084,7 +1095,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ PrepareCallApiFunction(kApiArgc); __ mov(ApiParameterOperand(0), ebx); // name. - __ add(Operand(ebx), Immediate(kPointerSize)); + __ add(ebx, Immediate(kPointerSize)); __ mov(ApiParameterOperand(1), ebx); // arguments pointer. // Emitting a stub call may try to allocate (if the code is not @@ -1158,40 +1169,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ push(receiver); - } - __ push(holder_reg); - __ push(name_reg); - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ cmp(eax, factory()->no_interceptor_result_sentinel()); - __ j(equal, &interceptor_failed); - __ LeaveInternalFrame(); - __ ret(0); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ push(receiver); + } + __ push(holder_reg); + __ push(name_reg); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ cmp(eax, factory()->no_interceptor_result_sentinel()); + __ j(equal, &interceptor_failed); + frame_scope.GenerateLeaveFrame(); + __ ret(0); - __ LeaveInternalFrame(); + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); + } + + // Leave the internal frame. + } // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into holder_reg. @@ -1259,7 +1272,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { - __ cmp(Operand(ecx), Immediate(Handle<String>(name))); + __ cmp(ecx, Immediate(Handle<String>(name))); __ j(not_equal, miss); } } @@ -1316,7 +1329,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, Immediate(Handle<SharedFunctionInfo>(function->shared()))); __ j(not_equal, miss); } else { - __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function))); + __ cmp(edi, Immediate(Handle<JSFunction>(function))); __ j(not_equal, miss); } } @@ -1441,21 +1454,25 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ j(not_equal, &call_builtin); if (argc == 1) { // Otherwise fall through to call builtin. - Label exit, with_write_barrier, attempt_to_grow_elements; + Label attempt_to_grow_elements, with_write_barrier; // Get the array's length into eax and calculate new length. __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - __ add(Operand(eax), Immediate(Smi::FromInt(argc))); + __ add(eax, Immediate(Smi::FromInt(argc))); // Get the element's length into ecx. __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); // Check if we could survive without allocation. - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(greater, &attempt_to_grow_elements); + // Check if value is a smi. + __ mov(ecx, Operand(esp, argc * kPointerSize)); + __ JumpIfNotSmi(ecx, &with_write_barrier); + // Save new length. __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); @@ -1463,20 +1480,27 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ lea(edx, FieldOperand(ebx, eax, times_half_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); - __ mov(ecx, Operand(esp, argc * kPointerSize)); __ mov(Operand(edx, 0), ecx); - // Check if value is a smi. - __ JumpIfNotSmi(ecx, &with_write_barrier); - - __ bind(&exit); __ ret((argc + 1) * kPointerSize); __ bind(&with_write_barrier); - __ InNewSpace(ebx, ecx, equal, &exit); + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(edi, &call_builtin); + + // Save new length. + __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); + + // Push the element. + __ lea(edx, FieldOperand(ebx, + eax, times_half_pointer_size, + FixedArray::kHeaderSize - argc * kPointerSize)); + __ mov(Operand(edx, 0), ecx); + + __ RecordWrite( + ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ RecordWriteHelper(ebx, edx, ecx); __ ret((argc + 1) * kPointerSize); __ bind(&attempt_to_grow_elements); @@ -1484,6 +1508,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ jmp(&call_builtin); } + __ mov(edi, Operand(esp, argc * kPointerSize)); + // Growing elements that are SMI-only requires special handling in case + // the new element is non-Smi. For now, delegate to the builtin. + Label no_fast_elements_check; + __ JumpIfSmi(edi, &no_fast_elements_check); + __ mov(esi, FieldOperand(edx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(esi, &call_builtin, Label::kFar); + __ bind(&no_fast_elements_check); + + // We could be lucky and the elements array could be at the top of + // new-space. In this case we can just grow it in place by moving the + // allocation pointer up. + ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = @@ -1497,33 +1534,43 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ lea(edx, FieldOperand(ebx, eax, times_half_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); - __ cmp(edx, Operand(ecx)); + __ cmp(edx, ecx); __ j(not_equal, &call_builtin); - __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize)); + __ add(ecx, Immediate(kAllocationDelta * kPointerSize)); __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); __ j(above, &call_builtin); // We fit and could grow elements. __ mov(Operand::StaticVariable(new_space_allocation_top), ecx); - __ mov(ecx, Operand(esp, argc * kPointerSize)); // Push the argument... - __ mov(Operand(edx, 0), ecx); + __ mov(Operand(edx, 0), edi); // ... and fill the rest with holes. for (int i = 1; i < kAllocationDelta; i++) { __ mov(Operand(edx, i * kPointerSize), Immediate(factory()->the_hole_value())); } + // We know the elements array is in new space so we don't need the + // remembered set, but we just pushed a value onto it so we may have to + // tell the incremental marker to rescan the object that we just grew. We + // don't need to worry about the holes because they are in old space and + // already marked black. + __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET); + // Restore receiver to edx as finish sequence assumes it's here. __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // Increment element's and array's sizes. __ add(FieldOperand(ebx, FixedArray::kLengthOffset), Immediate(Smi::FromInt(kAllocationDelta))); + + // NOTE: This only happen in new-space, where we don't + // care about the black-byte-count on pages. Otherwise we should + // update that too if the object is black. + __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); - // Elements are in new space, so write barrier is not required. __ ret((argc + 1) * kPointerSize); } @@ -1585,7 +1632,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, // Get the array's length into ecx and calculate new length. __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); - __ sub(Operand(ecx), Immediate(Smi::FromInt(1))); + __ sub(ecx, Immediate(Smi::FromInt(1))); __ j(negative, &return_undefined); // Get the last element. @@ -1594,7 +1641,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, __ mov(eax, FieldOperand(ebx, ecx, times_half_pointer_size, FixedArray::kHeaderSize)); - __ cmp(Operand(eax), Immediate(factory()->the_hole_value())); + __ cmp(eax, Immediate(factory()->the_hole_value())); __ j(equal, &call_builtin); // Set the array's length. @@ -2058,10 +2105,10 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, __ sar(ebx, kBitsPerInt - 1); // Do bitwise not or do nothing depending on ebx. - __ xor_(eax, Operand(ebx)); + __ xor_(eax, ebx); // Add 1 or do nothing depending on ebx. - __ sub(eax, Operand(ebx)); + __ sub(eax, ebx); // If the result is still negative, go to the slow case. // This only happens for the most negative smi. @@ -2144,7 +2191,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall( // Allocate space for v8::Arguments implicit values. Must be initialized // before calling any runtime function. - __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize)); + __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize)); // Check that the maps haven't changed and find a Holder as a side effect. CheckPrototypes(JSObject::cast(object), edx, holder, @@ -2160,7 +2207,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall( if (result->IsFailure()) return result; __ bind(&miss); - __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize)); + __ add(esp, Immediate(kFastApiCallArguments * kPointerSize)); __ bind(&miss_before_stack_reserved); MaybeObject* maybe_result = GenerateMissBranch(); @@ -2599,13 +2646,9 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, Immediate(Handle<Map>(object->map()))); __ j(not_equal, &miss); - // Compute the cell operand to use. - Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell)); - if (Serializer::enabled()) { - __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); - cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); - } + __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); + Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs @@ -2616,8 +2659,23 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ mov(cell_operand, eax); + Label done; + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); + + __ mov(ecx, eax); + __ lea(edx, cell_operand); + // Cells are always in the remembered set. + __ RecordWrite(ebx, // Object. + edx, // Address. + ecx, // Value. + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); // Return the value (register eax). + __ bind(&done); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1); __ ret(0); @@ -2649,7 +2707,7 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, __ IncrementCounter(counters->keyed_store_field(), 1); // Check that the name has not changed. - __ cmp(Operand(ecx), Immediate(Handle<String>(name))); + __ cmp(ecx, Immediate(Handle<String>(name))); __ j(not_equal, &miss); // Generate store field code. Trashes the name register. @@ -2697,9 +2755,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( MapList* receiver_maps, - CodeList* handler_ics) { + CodeList* handler_stubs, + MapList* transitioned_maps) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -2707,15 +2766,21 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( // -- esp[0] : return address // ----------------------------------- Label miss; - __ JumpIfSmi(edx, &miss); - - Register map_reg = ebx; - __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset)); - int receiver_count = receiver_maps->length(); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map(receiver_maps->at(current)); - __ cmp(map_reg, map); - __ j(equal, Handle<Code>(handler_ics->at(current))); + __ JumpIfSmi(edx, &miss, Label::kNear); + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + // ebx: receiver->map(). + for (int i = 0; i < receiver_maps->length(); ++i) { + Handle<Map> map(receiver_maps->at(i)); + __ cmp(edi, map); + if (transitioned_maps->at(i) == NULL) { + __ j(equal, Handle<Code>(handler_stubs->at(i))); + } else { + Label next_map; + __ j(not_equal, &next_map, Label::kNear); + __ mov(ebx, Immediate(Handle<Map>(transitioned_maps->at(i)))); + __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET); + __ bind(&next_map); + } } __ bind(&miss); Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); @@ -2941,7 +3006,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name, __ IncrementCounter(counters->keyed_load_field(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss); @@ -2971,7 +3036,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback( __ IncrementCounter(counters->keyed_load_callback(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx, @@ -3006,7 +3071,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ IncrementCounter(counters->keyed_load_constant_function(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi, @@ -3034,7 +3099,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ IncrementCounter(counters->keyed_load_interceptor(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); LookupResult lookup; @@ -3070,7 +3135,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { __ IncrementCounter(counters->keyed_load_array_length(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadArrayLength(masm(), edx, ecx, &miss); @@ -3095,7 +3160,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { __ IncrementCounter(counters->keyed_load_string_length(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true); @@ -3120,7 +3185,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { __ IncrementCounter(counters->keyed_load_function_prototype(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss); @@ -3155,7 +3220,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3298,7 +3363,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // Move argc to ebx and retrieve and tag the JSObject to return. __ mov(ebx, eax); __ pop(eax); - __ or_(Operand(eax), Immediate(kHeapObjectTag)); + __ or_(eax, Immediate(kHeapObjectTag)); // Remove caller arguments and receiver from the stack and return. __ pop(ecx); @@ -3679,10 +3744,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // If the value is NaN or +/-infinity, the result is 0x80000000, // which is automatically zero when taken mod 2^n, n < 32. __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ fisttp_d(Operand(esp, 0)); __ pop(ebx); - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); } else { ASSERT(CpuFeatures::IsSupported(SSE2)); CpuFeatures::Scope scope(SSE2); @@ -3838,15 +3903,17 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { +void KeyedStoreStubCompiler::GenerateStoreFastElement( + MacroAssembler* masm, + bool is_js_array, + ElementsKind elements_kind) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3870,11 +3937,28 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, __ j(above_equal, &miss_force_generic); } - // Do the store and update the write barrier. Make sure to preserve - // the value in register eax. - __ mov(edx, Operand(eax)); - __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax); - __ RecordWrite(edi, 0, edx, ecx); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(eax, &transition_elements_kind); + // ecx is a smi, use times_half_pointer_size instead of + // times_pointer_size + __ mov(FieldOperand(edi, + ecx, + times_half_pointer_size, + FixedArray::kHeaderSize), eax); + } else { + ASSERT(elements_kind == FAST_ELEMENTS); + // Do the store and update the write barrier. + // ecx is a smi, use times_half_pointer_size instead of + // times_pointer_size + __ lea(ecx, FieldOperand(edi, + ecx, + times_half_pointer_size, + FixedArray::kHeaderSize)); + __ mov(Operand(ecx, 0), eax); + // Make sure to preserve the value in register eax. + __ mov(edx, eax); + __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs); + } // Done. __ ret(0); @@ -3884,6 +3968,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + + // Handle transition to other elements kinds without using the generic stub. + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ jmp(ic_miss, RelocInfo::CODE_TARGET); } @@ -3896,8 +3985,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label miss_force_generic, smi_value, is_nan, maybe_nan; - Label have_double_value, not_nan; + Label miss_force_generic, transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3918,59 +4006,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( } __ j(above_equal, &miss_force_generic); - __ JumpIfSmi(eax, &smi_value, Label::kNear); - - __ CheckMap(eax, - masm->isolate()->factory()->heap_number_map(), - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Double value, canonicalize NaN. - uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); - __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32)); - __ j(greater_equal, &maybe_nan, Label::kNear); - - __ bind(¬_nan); - ExternalReference canonical_nan_reference = - ExternalReference::address_of_canonical_non_hole_nan(); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset)); - __ bind(&have_double_value); - __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize), - xmm0); - __ ret(0); - } else { - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ bind(&have_double_value); - __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize)); - __ ret(0); - } - - __ bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - __ j(greater, &is_nan, Label::kNear); - __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0)); - __ j(zero, ¬_nan); - __ bind(&is_nan); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference)); - } else { - __ fld_d(Operand::StaticVariable(canonical_nan_reference)); - } - __ jmp(&have_double_value, Label::kNear); - - __ bind(&smi_value); - // Value is a smi. convert to a double and store. - // Preserve original value. - __ mov(edx, eax); - __ SmiUntag(edx); - __ push(edx); - __ fild_s(Operand(esp, 0)); - __ pop(edx); - __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize)); + __ StoreNumberToDoubleElements(eax, + edi, + ecx, + edx, + xmm0, + &transition_elements_kind, + true); __ ret(0); // Handle store cache miss, replacing the ic with the generic stub. @@ -3978,6 +4020,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + + // Handle transition to other elements kinds without using the generic stub. + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ jmp(ic_miss, RelocInfo::CODE_TARGET); } |