diff options
author | Ali Ijaz Sheikh <ofrobots@google.com> | 2016-03-01 08:58:05 -0800 |
---|---|---|
committer | Ali Sheikh <ofrobots@lemonhope.roam.corp.google.com> | 2016-03-03 20:35:20 -0800 |
commit | 069e02ab47656b3efd1b6829c65856b2e1c2d1db (patch) | |
tree | eb643e0a2e88fd64bb9fc927423458d2ae96c2db /deps/v8/src/mips | |
parent | 8938355398c79f583a468284b768652d12ba9bc9 (diff) | |
download | node-new-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.gz |
deps: upgrade to V8 4.9.385.18
Pick up the current branch head for V8 4.9
https://github.com/v8/v8/commit/1ecba0f
PR-URL: https://github.com/nodejs/node/pull/4722
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Michaƫl Zasso <mic.besace@gmail.com>
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r-- | deps/v8/src/mips/OWNERS | 1 | ||||
-rw-r--r-- | deps/v8/src/mips/assembler-mips-inl.h | 46 | ||||
-rw-r--r-- | deps/v8/src/mips/assembler-mips.cc | 130 | ||||
-rw-r--r-- | deps/v8/src/mips/assembler-mips.h | 43 | ||||
-rw-r--r-- | deps/v8/src/mips/builtins-mips.cc | 1815 | ||||
-rw-r--r-- | deps/v8/src/mips/code-stubs-mips.cc | 396 | ||||
-rw-r--r-- | deps/v8/src/mips/code-stubs-mips.h | 9 | ||||
-rw-r--r-- | deps/v8/src/mips/codegen-mips.cc | 57 | ||||
-rw-r--r-- | deps/v8/src/mips/codegen-mips.h | 2 | ||||
-rw-r--r-- | deps/v8/src/mips/constants-mips.h | 48 | ||||
-rw-r--r-- | deps/v8/src/mips/cpu-mips.cc | 11 | ||||
-rw-r--r-- | deps/v8/src/mips/deoptimizer-mips.cc | 7 | ||||
-rw-r--r-- | deps/v8/src/mips/disasm-mips.cc | 93 | ||||
-rw-r--r-- | deps/v8/src/mips/interface-descriptors-mips.cc | 59 | ||||
-rw-r--r-- | deps/v8/src/mips/macro-assembler-mips.cc | 843 | ||||
-rw-r--r-- | deps/v8/src/mips/macro-assembler-mips.h | 209 | ||||
-rw-r--r-- | deps/v8/src/mips/simulator-mips.cc | 245 | ||||
-rw-r--r-- | deps/v8/src/mips/simulator-mips.h | 45 |
18 files changed, 2407 insertions, 1652 deletions
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS index 5508ba626f..89455a4fbd 100644 --- a/deps/v8/src/mips/OWNERS +++ b/deps/v8/src/mips/OWNERS @@ -3,3 +3,4 @@ gergely.kis@imgtec.com akos.palfi@imgtec.com balazs.kilvady@imgtec.com dusan.milosavljevic@imgtec.com +ivica.bogosavljevic@imgtec.com diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index 0719055eff..27ec8e5bda 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -92,7 +92,7 @@ void RelocInfo::apply(intptr_t delta) { // Absolute code pointer inside code object moves with the code object. byte* p = reinterpret_cast<byte*>(pc_); int count = Assembler::RelocateInternalReference(rmode_, p, delta); - CpuFeatures::FlushICache(p, count * sizeof(uint32_t)); + Assembler::FlushICache(isolate_, p, count * sizeof(uint32_t)); } } @@ -142,7 +142,8 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); - Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode); + Assembler::set_target_address_at(isolate_, pc_, host_, target, + icache_flush_mode); if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); @@ -179,7 +180,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc, void Assembler::deserialization_set_target_internal_reference_at( - Address pc, Address target, RelocInfo::Mode mode) { + Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) { if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { DCHECK(IsLui(instr_at(pc))); set_target_internal_reference_encoded_at(pc, target); @@ -207,7 +208,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - Assembler::set_target_address_at(pc_, host_, + Assembler::set_target_address_at(isolate_, pc_, host_, reinterpret_cast<Address>(target), icache_flush_mode); if (write_barrier_mode == UPDATE_WRITE_BARRIER && @@ -310,8 +311,7 @@ Code* RelocInfo::code_age_stub() { void RelocInfo::set_code_age_stub(Code* stub, ICacheFlushMode icache_flush_mode) { DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Assembler::set_target_address_at(pc_ + Assembler::kInstrSize, - host_, + Assembler::set_target_address_at(isolate_, pc_ + Assembler::kInstrSize, host_, stub->instruction_start()); } @@ -328,7 +328,7 @@ void RelocInfo::set_debug_call_address(Address target) { DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()); // The pc_ offset of 0 assumes patched debug break slot or return // sequence. - Assembler::set_target_address_at(pc_, host_, target); + Assembler::set_target_address_at(isolate_, pc_, host_, target); if (host() != NULL) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( @@ -346,7 +346,7 @@ void RelocInfo::WipeOut() { } else if (IsInternalReferenceEncoded(rmode_)) { Assembler::set_target_internal_reference_encoded_at(pc_, nullptr); } else { - Assembler::set_target_address_at(pc_, host_, NULL); + Assembler::set_target_address_at(isolate_, pc_, host_, NULL); } } @@ -437,11 +437,23 @@ void Assembler::CheckTrampolinePoolQuick(int extra_instructions) { } -void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { +void Assembler::CheckForEmitInForbiddenSlot() { if (!is_buffer_growth_blocked()) { CheckBuffer(); } if (IsPrevInstrCompactBranch()) { + // Nop instruction to preceed a CTI in forbidden slot: + Instr nop = SPECIAL | SLL; + *reinterpret_cast<Instr*>(pc_) = nop; + pc_ += kInstrSize; + + ClearCompactBranchState(); + } +} + + +void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) { + if (IsPrevInstrCompactBranch()) { if (Instruction::IsForbiddenAfterBranchInstr(x)) { // Nop instruction to preceed a CTI in forbidden slot: Instr nop = SPECIAL | SLL; @@ -459,6 +471,22 @@ void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { } +template <typename T> +void Assembler::EmitHelper(T x) { + *reinterpret_cast<T*>(pc_) = x; + pc_ += sizeof(x); + CheckTrampolinePoolQuick(); +} + + +void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { + if (!is_buffer_growth_blocked()) { + CheckBuffer(); + } + EmitHelper(x, is_compact_branch); +} + + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index 3860fe4e19..a8b6cc7c32 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -296,6 +296,7 @@ void Assembler::GetCode(CodeDesc* desc) { desc->instr_size = pc_offset(); desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); desc->origin = this; + desc->constant_pool_size = 0; } @@ -1335,17 +1336,23 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) { void Assembler::bovc(Register rs, Register rt, int16_t offset) { DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(!(rs.is(zero_reg))); - DCHECK(rs.code() >= rt.code()); - GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + DCHECK(!rs.is(zero_reg)); + if (rs.code() >= rt.code()) { + GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + } else { + GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); + } } void Assembler::bnvc(Register rs, Register rt, int16_t offset) { DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(!(rs.is(zero_reg))); - DCHECK(rs.code() >= rt.code()); - GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + DCHECK(!rs.is(zero_reg)); + if (rs.code() >= rt.code()) { + GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); + } else { + GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); + } } @@ -1652,7 +1659,7 @@ void Assembler::sll(Register rd, // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo // instructions. DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); - GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL); } @@ -1662,7 +1669,7 @@ void Assembler::sllv(Register rd, Register rt, Register rs) { void Assembler::srl(Register rd, Register rt, uint16_t sa) { - GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL); + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL); } @@ -1672,7 +1679,7 @@ void Assembler::srlv(Register rd, Register rt, Register rs) { void Assembler::sra(Register rd, Register rt, uint16_t sa) { - GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA); + GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA); } @@ -1693,7 +1700,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) { void Assembler::rotrv(Register rd, Register rt, Register rs) { // Should be called via MacroAssembler::Ror. - DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() ); + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid()); DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; @@ -1701,6 +1708,16 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) { } +void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) { + DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid()); + DCHECK(sa < 5 && sa > 0); + DCHECK(IsMipsArchVariant(kMips32r6)); + Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | + (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA; + emit(instr); +} + + // ------------Memory-instructions------------- // Helper for base-reg + offset, when offset is larger than int16. @@ -1818,7 +1835,7 @@ void Assembler::lui(Register rd, int32_t j) { } -void Assembler::aui(Register rs, Register rt, int32_t j) { +void Assembler::aui(Register rt, Register rs, int32_t j) { // This instruction uses same opcode as 'lui'. The difference in encoding is // 'lui' has zero reg. for rs field. DCHECK(!(rs.is(zero_reg))); @@ -2194,13 +2211,13 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C); } void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C); } @@ -2267,19 +2284,19 @@ void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) { void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C); } void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C); } void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); FPURegister ft; ft.reg_code = (cc & 0x0007) << 2 | 1; GenInstrRegister(COP1, S, ft, fs, fd, MOVF); @@ -2287,7 +2304,7 @@ void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) { void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); FPURegister ft; ft.reg_code = (cc & 0x0007) << 2 | 1; GenInstrRegister(COP1, D, ft, fs, fd, MOVF); @@ -2295,7 +2312,7 @@ void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) { void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); FPURegister ft; ft.reg_code = (cc & 0x0007) << 2 | 0; GenInstrRegister(COP1, S, ft, fs, fd, MOVF); @@ -2303,7 +2320,7 @@ void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) { void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(IsMipsArchVariant(kMips32r2)); + DCHECK(!IsMipsArchVariant(kMips32r6)); FPURegister ft; ft.reg_code = (cc & 0x0007) << 2 | 0; GenInstrRegister(COP1, D, ft, fs, fd, MOVF); @@ -2489,55 +2506,71 @@ void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); } void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); } void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); } void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); } void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); } void Assembler::round_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); } void Assembler::round_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D); } void Assembler::floor_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S); } void Assembler::floor_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D); } void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); } void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); } @@ -2632,7 +2665,8 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); } @@ -2648,7 +2682,8 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); + DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && + IsFp64Mode()); GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); } @@ -2796,6 +2831,7 @@ void Assembler::GrowBuffer() { // Set up new buffer. desc.buffer = NewArray<byte>(desc.buffer_size); + desc.origin = this; desc.instr_size = pc_offset(); desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); @@ -2829,54 +2865,42 @@ void Assembler::GrowBuffer() { void Assembler::db(uint8_t data) { - CheckBuffer(); - *reinterpret_cast<uint8_t*>(pc_) = data; - pc_ += sizeof(uint8_t); + CheckForEmitInForbiddenSlot(); + EmitHelper(data); } void Assembler::dd(uint32_t data) { - CheckBuffer(); - *reinterpret_cast<uint32_t*>(pc_) = data; - pc_ += sizeof(uint32_t); + CheckForEmitInForbiddenSlot(); + EmitHelper(data); } void Assembler::dq(uint64_t data) { - CheckBuffer(); - *reinterpret_cast<uint64_t*>(pc_) = data; - pc_ += sizeof(uint64_t); + CheckForEmitInForbiddenSlot(); + EmitHelper(data); } void Assembler::dd(Label* label) { - CheckBuffer(); - RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); uint32_t data; + CheckForEmitInForbiddenSlot(); if (label->is_bound()) { data = reinterpret_cast<uint32_t>(buffer_ + label->pos()); } else { data = jump_address(label); internal_reference_positions_.insert(label->pos()); } - *reinterpret_cast<uint32_t*>(pc_) = data; - pc_ += sizeof(uint32_t); -} - - -void Assembler::emit_code_stub_address(Code* stub) { - CheckBuffer(); - *reinterpret_cast<uint32_t*>(pc_) = - reinterpret_cast<uint32_t>(stub->instruction_start()); - pc_ += sizeof(uint32_t); + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + EmitHelper(data); } void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { // We do not try to reuse pool constants. - RelocInfo rinfo(pc_, rmode, data, NULL); + RelocInfo rinfo(isolate(), pc_, rmode, data, NULL); if (rmode >= RelocInfo::COMMENT && - rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) { + rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) { // Adjust code for new modes. DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) @@ -2891,10 +2915,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(pc_, - rmode, - RecordedAstId().ToInt(), - NULL); + RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode, + RecordedAstId().ToInt(), NULL); ClearRecordedAstId(); reloc_info_writer.Write(&reloc_info_with_ast_id); } else { @@ -3006,7 +3028,7 @@ void Assembler::QuietNaN(HeapObject* object) { // There is an optimization below, which emits a nop when the address // fits in just 16 bits. This is unlikely to help, and should be benchmarked, // and possibly removed. -void Assembler::set_target_address_at(Address pc, +void Assembler::set_target_address_at(Isolate* isolate, Address pc, Address target, ICacheFlushMode icache_flush_mode) { Instr instr2 = instr_at(pc + kInstrSize); @@ -3028,7 +3050,7 @@ void Assembler::set_target_address_at(Address pc, if (icache_flush_mode != SKIP_ICACHE_FLUSH) { - CpuFeatures::FlushICache(pc, 2 * sizeof(int32_t)); + Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t)); } } diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h index 682c6602da..054695483f 100644 --- a/deps/v8/src/mips/assembler-mips.h +++ b/deps/v8/src/mips/assembler-mips.h @@ -457,30 +457,28 @@ class Assembler : public AssemblerBase { // Read/Modify the code target address in the branch/call instruction at pc. static Address target_address_at(Address pc); - static void set_target_address_at(Address pc, - Address target, - ICacheFlushMode icache_flush_mode = - FLUSH_ICACHE_IF_NEEDED); + static void set_target_address_at( + Isolate* isolate, Address pc, Address target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); // On MIPS there is no Constant Pool so we skip that parameter. INLINE(static Address target_address_at(Address pc, Address constant_pool)) { return target_address_at(pc); } INLINE(static void set_target_address_at( - Address pc, Address constant_pool, Address target, + Isolate* isolate, Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { - set_target_address_at(pc, target, icache_flush_mode); + set_target_address_at(isolate, pc, target, icache_flush_mode); } INLINE(static Address target_address_at(Address pc, Code* code)) { Address constant_pool = code ? code->constant_pool() : NULL; return target_address_at(pc, constant_pool); } - INLINE(static void set_target_address_at(Address pc, - Code* code, - Address target, - ICacheFlushMode icache_flush_mode = - FLUSH_ICACHE_IF_NEEDED)) { + INLINE(static void set_target_address_at( + Isolate* isolate, Address pc, Code* code, Address target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { Address constant_pool = code ? code->constant_pool() : NULL; - set_target_address_at(pc, constant_pool, target, icache_flush_mode); + set_target_address_at(isolate, pc, constant_pool, target, + icache_flush_mode); } // Return the code target address at a call site from the return address @@ -493,16 +491,17 @@ class Assembler : public AssemblerBase { // This is for calls and branches within generated code. The serializer // has already deserialized the lui/ori instructions etc. inline static void deserialization_set_special_target_at( - Address instruction_payload, Code* code, Address target) { + Isolate* isolate, Address instruction_payload, Code* code, + Address target) { set_target_address_at( - instruction_payload - kInstructionsFor32BitConstant * kInstrSize, - code, + isolate, + instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code, target); } // This sets the internal reference at the pc. inline static void deserialization_set_target_internal_reference_at( - Address pc, Address target, + Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); // Size of an instruction. @@ -752,6 +751,8 @@ class Assembler : public AssemblerBase { void rotr(Register rd, Register rt, uint16_t sa); void rotrv(Register rd, Register rt, Register rs); + // Address computing instructions with shift. + void lsa(Register rd, Register rt, Register rs, uint8_t sa); // ------------Memory-instructions------------- @@ -1012,7 +1013,7 @@ class Assembler : public AssemblerBase { void RecordGeneratorContinuation(); // Mark address of a debug break slot. - void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0); + void RecordDebugBreakSlot(RelocInfo::Mode mode); // Record the AST id of the CallIC being compiled, so that it can be placed // in the relocation information. @@ -1048,9 +1049,6 @@ class Assembler : public AssemblerBase { void dp(uintptr_t data) { dd(data); } void dd(Label* label); - // Emits the address of the code stub's first instruction. - void emit_code_stub_address(Code* stub); - PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Postpone the generation of the trampoline pool for the specified number of @@ -1260,6 +1258,11 @@ class Assembler : public AssemblerBase { void GrowBuffer(); inline void emit(Instr x, CompactBranchType is_compact_branch = CompactBranchType::NO); + inline void emit(uint64_t x); + inline void CheckForEmitInForbiddenSlot(); + template <typename T> + inline void EmitHelper(T x); + inline void EmitHelper(Instr x, CompactBranchType is_compact_branch); // Instruction generation. // We have 3 different kind of encoding layout on MIPS. diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 08f8e65359..f6c1dfbaaf 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -23,9 +23,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, BuiltinExtraArguments extra_args) { // ----------- S t a t e ------------- // -- a0 : number of arguments excluding receiver - // (only guaranteed when the called function - // is not marked as DontAdaptArguments) - // -- a1 : called function + // -- a1 : target + // -- a3 : new.target // -- sp[0] : last argument // -- ... // -- sp[4 * (argc - 1)] : first argument @@ -37,34 +36,30 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // ConstructStubs implemented in C++ will be run in the context of the caller // instead of the callee, due to the way that [[Construct]] is defined for // ordinary functions). - // TODO(bmeurer): Can we make this more robust? __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); // Insert extra arguments. int num_extra_args = 0; - if (extra_args == NEEDS_CALLED_FUNCTION) { - num_extra_args = 1; - __ push(a1); - } else { - DCHECK(extra_args == NO_EXTRA_ARGUMENTS); + switch (extra_args) { + case BuiltinExtraArguments::kTarget: + __ Push(a1); + ++num_extra_args; + break; + case BuiltinExtraArguments::kNewTarget: + __ Push(a3); + ++num_extra_args; + break; + case BuiltinExtraArguments::kTargetAndNewTarget: + __ Push(a1, a3); + num_extra_args += 2; + break; + case BuiltinExtraArguments::kNone: + break; } // JumpToExternalReference expects a0 to contain the number of arguments - // including the receiver and the extra arguments. But a0 is only valid - // if the called function is marked as DontAdaptArguments, otherwise we - // need to load the argument count from the SharedFunctionInfo. - Label argc, done_argc; - __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(a2, - FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ SmiUntag(a2); - __ Branch(&argc, eq, a2, - Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); - __ Addu(a0, a2, num_extra_args + 1); - __ jmp(&done_argc); - __ bind(&argc); + // including the receiver and the extra arguments. __ Addu(a0, a0, num_extra_args + 1); - __ bind(&done_argc); __ JumpToExternalReference(ExternalReference(id, masm->isolate())); } @@ -73,30 +68,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // Load the built-in InternalArray function from the current context. static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, Register result) { - // Load the native context. - - __ lw(result, - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); // Load the InternalArray function from the native context. - __ lw(result, - MemOperand(result, - Context::SlotOffset( - Context::INTERNAL_ARRAY_FUNCTION_INDEX))); + __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result); } // Load the built-in Array function from the current context. static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { - // Load the native context. - - __ lw(result, - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset)); // Load the Array function from the native context. - __ lw(result, - MemOperand(result, - Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); + __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result); } @@ -162,6 +142,108 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { // static +void Builtins::Generate_NumberConstructor(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- ra : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) + // -- sp[argc * 4] : receiver + // ----------------------------------- + + // 1. Load the first argument into a0 and get rid of the rest (including the + // receiver). + Label no_arguments; + { + __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg)); + __ Subu(a0, a0, Operand(1)); + __ sll(a0, a0, kPointerSizeLog2); + __ Addu(sp, a0, sp); + __ lw(a0, MemOperand(sp)); + __ Drop(2); + } + + // 2a. Convert first argument to number. + ToNumberStub stub(masm->isolate()); + __ TailCallStub(&stub); + + // 2b. No arguments, return +0. + __ bind(&no_arguments); + __ Move(v0, Smi::FromInt(0)); + __ DropAndRet(1); +} + + +// static +void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments + // -- a1 : constructor function + // -- a3 : new target + // -- ra : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) + // -- sp[argc * 4] : receiver + // ----------------------------------- + + // 1. Make sure we operate in the context of the called function. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // 2. Load the first argument into a0 and get rid of the rest (including the + // receiver). + { + Label no_arguments, done; + __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg)); + __ Subu(a0, a0, Operand(1)); + __ sll(a0, a0, kPointerSizeLog2); + __ Addu(sp, a0, sp); + __ lw(a0, MemOperand(sp)); + __ Drop(2); + __ jmp(&done); + __ bind(&no_arguments); + __ Move(a0, Smi::FromInt(0)); + __ Drop(1); + __ bind(&done); + } + + // 3. Make sure a0 is a number. + { + Label done_convert; + __ JumpIfSmi(a0, &done_convert); + __ GetObjectType(a0, a2, a2); + __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1, a3); + ToNumberStub stub(masm->isolate()); + __ CallStub(&stub); + __ Move(a0, v0); + __ Pop(a1, a3); + } + __ bind(&done_convert); + } + + // 4. Check if new target and constructor differ. + Label new_object; + __ Branch(&new_object, ne, a1, Operand(a3)); + + // 5. Allocate a JSValue wrapper for the number. + __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object); + __ Ret(); + + // 6. Fallback to the runtime to create new object. + __ bind(&new_object); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a0, a1, a3); // first argument, constructor, new target + __ CallRuntime(Runtime::kNewObject); + __ Pop(a0); + } + __ Ret(USE_DELAY_SLOT); + __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot +} + + +// static void Builtins::Generate_StringConstructor(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments @@ -215,7 +297,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) { __ bind(&symbol_descriptive_string); { __ Push(a0); - __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1); + __ TailCallRuntime(Runtime::kSymbolDescriptiveString); } } @@ -225,13 +307,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments // -- a1 : constructor function - // -- a3 : original constructor + // -- a3 : new target // -- ra : return address // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) // -- sp[argc * 4] : receiver // ----------------------------------- - // 1. Load the first argument into a0 and get rid of the rest (including the + // 1. Make sure we operate in the context of the called function. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // 2. Load the first argument into a0 and get rid of the rest (including the // receiver). { Label no_arguments, done; @@ -248,7 +333,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) { __ bind(&done); } - // 2. Make sure a0 is a string. + // 3. Make sure a0 is a string. { Label convert, done_convert; __ JumpIfSmi(a0, &convert); @@ -267,68 +352,42 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) { __ bind(&done_convert); } - // 3. Allocate a JSValue wrapper for the string. - { - // ----------- S t a t e ------------- - // -- a0 : the first argument - // -- a1 : constructor function - // -- a3 : original constructor - // -- ra : return address - // ----------------------------------- - - Label allocate, done_allocate, rt_call; - - // Fall back to runtime if the original constructor and function differ. - __ Branch(&rt_call, ne, a1, Operand(a3)); - - __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT); - __ bind(&done_allocate); - - // Initialize the JSValue in eax. - __ LoadGlobalFunctionInitialMap(a1, a2, a3); - __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); - __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); - __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); - __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); - __ Ret(USE_DELAY_SLOT); - __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); - STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); + // 4. Check if new target and constructor differ. + Label new_object; + __ Branch(&new_object, ne, a1, Operand(a3)); - // Fallback to the runtime to allocate in new space. - __ bind(&allocate); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Move(a2, Smi::FromInt(JSValue::kSize)); - __ Push(a0, a1, a2); - __ CallRuntime(Runtime::kAllocateInNewSpace, 1); - __ Pop(a0, a1); - } - __ jmp(&done_allocate); + // 5. Allocate a JSValue wrapper for the string. + __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object); + __ Ret(); - // Fallback to the runtime to create new object. - __ bind(&rt_call); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a0, a1, a1, a3); // constructor function, original constructor - __ CallRuntime(Runtime::kNewObject, 2); - __ Pop(a0, a1); - } - __ Ret(USE_DELAY_SLOT); - __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); + // 6. Fallback to the runtime to create new object. + __ bind(&new_object); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a0, a1, a3); // first argument, constructor, new target + __ CallRuntime(Runtime::kNewObject); + __ Pop(a0); } + __ Ret(USE_DELAY_SLOT); + __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot } static void CallRuntimePassFunction( MacroAssembler* masm, Runtime::FunctionId function_id) { + // ----------- S t a t e ------------- + // -- a1 : target function (preserved for callee) + // -- a3 : new target (preserved for callee) + // ----------------------------------- + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function onto the stack. - // Push call kind information and function as parameter to the runtime call. - __ Push(a1, a1); + // Push a copy of the target function and the new target. + // Push function as parameter to the runtime call. + __ Push(a1, a3, a1); __ CallRuntime(function_id, 1); - // Restore call kind information and receiver. - __ Pop(a1); + // Restore target function and new target. + __ Pop(a1, a3); } @@ -365,12 +424,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { static void Generate_JSConstructStubHelper(MacroAssembler* masm, - bool is_api_function) { + bool is_api_function, + bool create_implicit_receiver) { // ----------- S t a t e ------------- // -- a0 : number of arguments // -- a1 : constructor function // -- a2 : allocation site or undefined - // -- a3 : original constructor + // -- a3 : new target // -- ra : return address // -- sp[...]: constructor arguments // ----------------------------------- @@ -384,170 +444,162 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Preserve the incoming parameters on the stack. __ AssertUndefinedOrAllocationSite(a2, t0); __ SmiTag(a0); - __ Push(a2, a0, a1, a3); - - // Try to allocate the object without transitioning into C code. If any of - // the preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); - __ li(a2, Operand(debug_step_in_fp)); - __ lw(a2, MemOperand(a2)); - __ Branch(&rt_call, ne, a2, Operand(zero_reg)); - - // Verify that the original constructor is a JSFunction. - __ GetObjectType(a3, t1, t0); - __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE)); - - // Load the initial map and verify that it is in fact a map. - // a3: original constructor - __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(a2, &rt_call); - __ GetObjectType(a2, t5, t4); - __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE)); - - // Fall back to runtime if the expected base constructor and base - // constructor differ. - __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset)); - __ Branch(&rt_call, ne, a1, Operand(t1)); - - // Check that the constructor is not constructing a JSFunction (see - // comments in Runtime_NewObject in runtime.cc). In which case the - // initial map's instance type would be JS_FUNCTION_TYPE. - // a1: constructor function - // a2: initial map - __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset)); - __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE)); - - if (!is_api_function) { - Label allocate; - MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); - // Check if slack tracking is enabled. - __ lw(t0, bit_field3); - __ DecodeField<Map::Counter>(t2, t0); - __ Branch(&allocate, lt, t2, Operand(Map::kSlackTrackingCounterEnd)); - // Decrease generous allocation count. - __ Subu(t0, t0, Operand(1 << Map::Counter::kShift)); - __ Branch(USE_DELAY_SLOT, &allocate, ne, t2, - Operand(Map::kSlackTrackingCounterEnd)); - __ sw(t0, bit_field3); // In delay slot. - - __ Push(a1, a2, a2); // a2 = Initial map. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - - __ Pop(a1, a2); - __ li(t2, Operand(Map::kSlackTrackingCounterEnd - 1)); - - __ bind(&allocate); - } - - // Now allocate the JSObject on the heap. - // a1: constructor function - // a2: initial map - Label rt_call_reload_new_target; - __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); - - __ Allocate(a3, t4, t5, t6, &rt_call_reload_new_target, SIZE_IN_WORDS); - - // Allocated the JSObject, now initialize the fields. Map is set to - // initial map and properties and elements are set to empty fixed array. - // a1: constructor function - // a2: initial map - // a3: object size - // t4: JSObject (not tagged) - __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex); - __ mov(t5, t4); - __ sw(a2, MemOperand(t5, JSObject::kMapOffset)); - __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); - __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); - __ Addu(t5, t5, Operand(3*kPointerSize)); - DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset); - DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset); - - // Fill all the in-object properties with appropriate filler. - // a1: constructor function - // a2: initial map - // a3: object size (in words) - // t4: JSObject (not tagged) - // t5: First in-object property of JSObject (not tagged) - // t2: slack tracking counter (non-API function case) - DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize); - - // Use t7 to hold undefined, which is used in several places below. - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - - if (!is_api_function) { - Label no_inobject_slack_tracking; - - // Check if slack tracking is enabled. - __ Branch(&no_inobject_slack_tracking, lt, t2, - Operand(Map::kSlackTrackingCounterEnd)); - - // Allocate object with a slack. - __ lbu( - a0, - FieldMemOperand( - a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset)); - __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); - __ subu(a0, a0, a2); - __ sll(at, a0, kPointerSizeLog2); - __ addu(a0, t5, at); - // a0: offset of first field after pre-allocated fields - if (FLAG_debug_code) { - __ sll(at, a3, kPointerSizeLog2); - __ Addu(t6, t4, Operand(at)); // End of object. - __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, - a0, Operand(t6)); + __ Push(a2, a0); + + if (create_implicit_receiver) { + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + // Verify that the new target is a JSFunction. + __ GetObjectType(a3, t1, t0); + __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE)); + + // Load the initial map and verify that it is in fact a map. + // a3: new target + __ lw(a2, + FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(a2, &rt_call); + __ GetObjectType(a2, t5, t4); + __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE)); + + // Fall back to runtime if the expected base constructor and base + // constructor differ. + __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset)); + __ Branch(&rt_call, ne, a1, Operand(t1)); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // a1: constructor function + // a2: initial map + __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset)); + __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE)); + + // Now allocate the JSObject on the heap. + // a1: constructor function + // a2: initial map + // a3: new target + __ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); + + __ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to + // initial map and properties and elements are set to empty fixed array. + // a1: constructor function + // a2: initial map + // a3: new target + // t4: JSObject (not HeapObject tagged - the actual address). + // t3: start of next object + __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex); + __ mov(t5, t4); + STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset); + __ sw(a2, MemOperand(t5, JSObject::kMapOffset)); + STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset); + __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); + STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset); + __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); + STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize); + __ Addu(t5, t5, Operand(3 * kPointerSize)); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. + __ Addu(t4, t4, Operand(kHeapObjectTag)); + + // Fill all the in-object properties with appropriate filler. + // t4: JSObject (tagged) + // t5: First in-object property of JSObject (not tagged) + __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); + + if (!is_api_function) { + Label no_inobject_slack_tracking; + + MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); + // Check if slack tracking is enabled. + __ lw(t0, bit_field3); + __ DecodeField<Map::ConstructionCounter>(t2, t0); + // t2: slack tracking counter + __ Branch(&no_inobject_slack_tracking, lt, t2, + Operand(Map::kSlackTrackingCounterEnd)); + // Decrease generous allocation count. + __ Subu(t0, t0, Operand(1 << Map::ConstructionCounter::kShift)); + __ sw(t0, bit_field3); + + // Allocate object with a slack. + __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); + __ sll(a0, a0, kPointerSizeLog2); + __ subu(a0, t3, a0); + // a0: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t5, + Operand(a0)); + } + __ InitializeFieldsWithFiller(t5, a0, t7); + + // To allow truncation fill the remaining fields with one pointer + // filler map. + __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex); + __ InitializeFieldsWithFiller(t5, t3, t7); + + // t2: slack tracking counter value before decreasing. + __ Branch(&allocated, ne, t2, Operand(Map::kSlackTrackingCounterEnd)); + + // Push the constructor, new_target and the object to the stack, + // and then the initial map as an argument to the runtime call. + __ Push(a1, a3, t4, a2); + __ CallRuntime(Runtime::kFinalizeInstanceSize); + __ Pop(a1, a3, t4); + + // Continue with JSObject being successfully allocated. + // a1: constructor function + // a3: new target + // t4: JSObject + __ jmp(&allocated); + + __ bind(&no_inobject_slack_tracking); } - __ InitializeFieldsWithFiller(t5, a0, t7); - // To allow for truncation. - __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex); - // Fill the remaining fields with one pointer filler map. - __ bind(&no_inobject_slack_tracking); - } + __ InitializeFieldsWithFiller(t5, t3, t7); - __ sll(at, a3, kPointerSizeLog2); - __ Addu(a0, t4, Operand(at)); // End of object. - __ InitializeFieldsWithFiller(t5, a0, t7); + // Continue with JSObject being successfully allocated. + // a1: constructor function + // a3: new target + // t4: JSObject + __ jmp(&allocated); + } - // Add the object tag to make the JSObject real, so that we can continue - // and jump into the continuation code at any time from now on. - __ Addu(t4, t4, Operand(kHeapObjectTag)); + // Allocate the new receiver object using the runtime call. + // a1: constructor function + // a3: new target + __ bind(&rt_call); + + // Push the constructor and new_target twice, second pair as arguments + // to the runtime call. + __ Push(a1, a3, a1, a3); // constructor function, new target + __ CallRuntime(Runtime::kNewObject); + __ mov(t4, v0); + __ Pop(a1, a3); - // Continue with JSObject being successfully allocated. + // Receiver for constructor call allocated. + // a1: constructor function + // a3: new target // t4: JSObject - __ jmp(&allocated); + __ bind(&allocated); - // Reload the original constructor and fall-through. - __ bind(&rt_call_reload_new_target); - __ lw(a3, MemOperand(sp, 0 * kPointerSize)); + // Retrieve smi-tagged arguments count from the stack. + __ lw(a0, MemOperand(sp)); } - // Allocate the new receiver object using the runtime call. - // a1: constructor function - // a3: original constructor - __ bind(&rt_call); - - __ Push(a1, a3); // constructor function, original constructor - __ CallRuntime(Runtime::kNewObject, 2); - __ mov(t4, v0); - - // Receiver for constructor call allocated. - // t4: JSObject - __ bind(&allocated); - - // Restore the parameters. - __ Pop(a3); // new.target - __ Pop(a1); - - // Retrieve smi-tagged arguments count from the stack. - __ lw(a0, MemOperand(sp)); __ SmiUntag(a0); - __ Push(a3, t4, t4); + if (create_implicit_receiver) { + // Push the allocated receiver to the stack. We need two copies + // because we may have to return the original one and the calling + // conventions dictate that the called function pops the receiver. + __ Push(t4, t4); + } else { + __ PushRoot(Heap::kTheHoleValueRootIndex); + } // Set up pointer to last argument. __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); @@ -556,26 +608,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // a0: number of arguments // a1: constructor function // a2: address of last argument (caller sp) - // a3: number of arguments (smi-tagged) + // a3: new target + // t4: number of arguments (smi-tagged) // sp[0]: receiver // sp[1]: receiver - // sp[2]: new.target - // sp[3]: number of arguments (smi-tagged) + // sp[2]: number of arguments (smi-tagged) Label loop, entry; - __ SmiTag(a3, a0); + __ SmiTag(t4, a0); __ jmp(&entry); __ bind(&loop); - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); + __ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize); __ Addu(t0, a2, Operand(t0)); __ lw(t1, MemOperand(t0)); __ push(t1); __ bind(&entry); - __ Addu(a3, a3, Operand(-2)); - __ Branch(&loop, greater_equal, a3, Operand(zero_reg)); + __ Addu(t4, t4, Operand(-2)); + __ Branch(&loop, greater_equal, t4, Operand(zero_reg)); // Call the function. // a0: number of arguments // a1: constructor function + // a3: new target if (is_api_function) { __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); Handle<Code> code = @@ -583,47 +636,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ Call(code, RelocInfo::CODE_TARGET); } else { ParameterCount actual(a0); - __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(a1, a3, actual, CALL_FUNCTION, + CheckDebugStepCallWrapper()); } // Store offset of return address for deoptimizer. - if (!is_api_function) { + if (create_implicit_receiver && !is_api_function) { masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); } // Restore context from the frame. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - - // If the result is a smi, it is *not* an object in the ECMA sense. - // v0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: new.target - // sp[2]: number of arguments (smi-tagged) - __ JumpIfSmi(v0, &use_receiver); - - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ GetObjectType(v0, a1, a3); - __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ lw(v0, MemOperand(sp)); - - // Remove receiver from the stack, remove caller arguments, and - // return. - __ bind(&exit); - // v0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: new.target (original constructor) - // sp[2]: number of arguments (smi-tagged) - __ lw(a1, MemOperand(sp, 2 * kPointerSize)); + if (create_implicit_receiver) { + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // v0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: number of arguments (smi-tagged) + __ JumpIfSmi(v0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. + __ GetObjectType(v0, a1, a3); + __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE)); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ lw(v0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // v0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: number of arguments (smi-tagged) + __ lw(a1, MemOperand(sp, 1 * kPointerSize)); + } else { + __ lw(a1, MemOperand(sp)); + } // Leave construct frame. } @@ -631,104 +687,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ sll(t0, a1, kPointerSizeLog2 - 1); __ Addu(sp, sp, t0); __ Addu(sp, sp, kPointerSize); - __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); + if (create_implicit_receiver) { + __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); + } __ Ret(); } void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, false); + Generate_JSConstructStubHelper(masm, false, true); } void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { - Generate_JSConstructStubHelper(masm, true); + Generate_JSConstructStubHelper(masm, true, true); } -void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : number of arguments - // -- a1 : constructor function - // -- a2 : allocation site or undefined - // -- a3 : original constructor - // -- ra : return address - // -- sp[...]: constructor arguments - // ----------------------------------- - - { - FrameScope frame_scope(masm, StackFrame::CONSTRUCT); - - __ AssertUndefinedOrAllocationSite(a2, t0); - __ push(a2); - - __ mov(t0, a0); - __ SmiTag(t0); - __ push(t0); // Smi-tagged arguments count. - - // Push new.target. - __ push(a3); - - // receiver is the hole. - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - __ push(at); - - // Set up pointer to last argument. - __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - - // Copy arguments and receiver to the expression stack. - // a0: number of arguments - // a1: constructor function - // a2: address of last argument (caller sp) - // t0: number of arguments (smi-tagged) - // sp[0]: receiver - // sp[1]: new.target - // sp[2]: number of arguments (smi-tagged) - Label loop, entry; - __ Branch(&entry); - __ bind(&loop); - __ sll(at, t0, kPointerSizeLog2 - 1); - __ Addu(at, a2, Operand(at)); - __ lw(at, MemOperand(at)); - __ push(at); - __ bind(&entry); - __ Subu(t0, t0, Operand(2)); - __ Branch(&loop, ge, t0, Operand(zero_reg)); - - // Handle step in. - Label skip_step_in; - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(masm->isolate()); - __ li(a2, Operand(debug_step_in_fp)); - __ lw(a2, MemOperand(a2)); - __ Branch(&skip_step_in, eq, a2, Operand(zero_reg)); - - __ Push(a0, a1, a1); - __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1); - __ Pop(a0, a1); - - __ bind(&skip_step_in); - - // Call the function. - // a0: number of arguments - // a1: constructor function - ParameterCount actual(a0); - __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); - - // Restore context from the frame. - // v0: result - // sp[0]: new.target - // sp[1]: number of arguments (smi-tagged) - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ lw(a1, MemOperand(sp, kPointerSize)); +void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { + Generate_JSConstructStubHelper(masm, false, false); +} - // Leave construct frame. - } - __ sll(at, a1, kPointerSizeLog2 - 1); - __ Addu(sp, sp, Operand(at)); - __ Addu(sp, sp, Operand(kPointerSize)); - __ Jump(ra); +void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructedNonConstructable); } @@ -744,7 +728,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, Label okay; __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); // Make a2 the space we have left. The stack might already be overflowed - // here which will cause r2 to become negative. + // here which will cause a2 to become negative. __ Subu(a2, sp, a2); // Check if the arguments will overflow the stack. if (argc_is_tagged == kArgcIsSmiTagged) { @@ -757,7 +741,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, __ Branch(&okay, gt, a2, Operand(t3)); // Out of stack space. - __ CallRuntime(Runtime::kThrowStackOverflow, 0); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bind(&okay); } @@ -861,6 +845,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { // // The live registers are: // o a1: the JS function object being called. +// o a3: the new target // o cp: our context // o fp: the caller's frame pointer // o sp: stack pointer @@ -878,6 +863,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(ra, fp, cp, a1); __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + __ Push(a3); + + // Push zero for bytecode array offset. + __ Push(zero_reg); // Get the bytecode array from the function object and load the pointer to the // first entry into kInterpreterBytecodeRegister. @@ -906,7 +895,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Subu(t1, sp, Operand(t0)); __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); __ Branch(&ok, hs, t1, Operand(a2)); - __ CallRuntime(Runtime::kThrowStackOverflow, 0); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bind(&ok); // If ok, push undefined as the initial value for all register file entries. @@ -936,16 +925,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ LoadRoot(at, Heap::kStackLimitRootIndex); __ Branch(&ok, hs, sp, Operand(at)); __ push(kInterpreterBytecodeArrayRegister); - __ CallRuntime(Runtime::kStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard); __ pop(kInterpreterBytecodeArrayRegister); __ bind(&ok); } // Load bytecode offset and dispatch table into registers. __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex); - __ Subu( - kInterpreterRegisterFileRegister, fp, - Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp)); + __ Addu(kInterpreterRegisterFileRegister, fp, + Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp)); __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); __ LoadRoot(kInterpreterDispatchTableRegister, @@ -1021,7 +1009,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) { void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : argument count (not including receiver) - // -- a3 : original constructor + // -- a3 : new target // -- a1 : constructor to call // -- a2 : address of the first argument // ----------------------------------- @@ -1044,45 +1032,114 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { __ Branch(&loop_header, gt, a2, Operand(t0)); // Call the constructor with a0, a1, and a3 unmodified. - __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL); + __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); } -void Builtins::Generate_CompileLazy(MacroAssembler* masm) { - CallRuntimePassFunction(masm, Runtime::kCompileLazy); - GenerateTailCallToReturnedCode(masm); +static void Generate_InterpreterNotifyDeoptimizedHelper( + MacroAssembler* masm, Deoptimizer::BailoutType type) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(kInterpreterAccumulatorRegister); // Save accumulator register. + + // Pass the deoptimization type to the runtime system. + __ li(a1, Operand(Smi::FromInt(static_cast<int>(type)))); + __ push(a1); + __ CallRuntime(Runtime::kNotifyDeoptimized); + + __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register. + // Tear down internal frame. + } + + // Drop state (we don't use this for interpreter deopts). + __ Drop(1); + + // Initialize register file register and dispatch table register. + __ Addu(kInterpreterRegisterFileRegister, fp, + Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp)); + __ LoadRoot(kInterpreterDispatchTableRegister, + Heap::kInterpreterTableRootIndex); + __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + + // Get the context from the frame. + // TODO(rmcilroy): Update interpreter frame to expect current context at the + // context slot instead of the function context. + __ lw(kContextRegister, + MemOperand(kInterpreterRegisterFileRegister, + InterpreterFrameConstants::kContextFromRegisterPointer)); + + // Get the bytecode array pointer from the frame. + __ lw(a1, + MemOperand(kInterpreterRegisterFileRegister, + InterpreterFrameConstants::kFunctionFromRegisterPointer)); + __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(kInterpreterBytecodeArrayRegister, + FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset)); + + if (FLAG_debug_code) { + // Check function data field is actually a BytecodeArray object. + __ SmiTst(kInterpreterBytecodeArrayRegister, at); + __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at, + Operand(zero_reg)); + __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); + __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1, + Operand(BYTECODE_ARRAY_TYPE)); + } + + // Get the target bytecode offset from the frame. + __ lw(kInterpreterBytecodeOffsetRegister, + MemOperand( + kInterpreterRegisterFileRegister, + InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + + // Dispatch to the target bytecode. + __ Addu(a1, kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister); + __ lbu(a1, MemOperand(a1)); + __ sll(a1, a1, kPointerSizeLog2); + __ Addu(a1, kInterpreterDispatchTableRegister, a1); + __ lw(a1, MemOperand(a1)); + __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(a1); } -static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) { - FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function onto the stack. - // Push function as parameter to the runtime call. - __ Push(a1, a1); - // Whether to compile in a background thread. - __ LoadRoot( - at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex); - __ push(at); +void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) { + Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); +} + + +void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) { + Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); +} + + +void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) { + Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); +} + - __ CallRuntime(Runtime::kCompileOptimized, 2); - // Restore receiver. - __ Pop(a1); +void Builtins::Generate_CompileLazy(MacroAssembler* masm) { + CallRuntimePassFunction(masm, Runtime::kCompileLazy); + GenerateTailCallToReturnedCode(masm); } void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { - CallCompileOptimized(masm, false); + CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); GenerateTailCallToReturnedCode(masm); } void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { - CallCompileOptimized(masm, true); + CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent); GenerateTailCallToReturnedCode(masm); } - static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // For now, we are relying on the fact that make_code_young doesn't do any // garbage collection which allows us to save/restore the registers without @@ -1098,8 +1155,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // the runtime: // a0 - contains return address (beginning of patch sequence) // a1 - isolate + // a3 - new target RegList saved_regs = - (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); + (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit(); FrameScope scope(masm, StackFrame::MANUAL); __ MultiPush(saved_regs); __ PrepareCallCFunction(2, 0, a2); @@ -1137,8 +1195,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { // the runtime: // a0 - contains return address (beginning of patch sequence) // a1 - isolate + // a3 - new target RegList saved_regs = - (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); + (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit(); FrameScope scope(masm, StackFrame::MANUAL); __ MultiPush(saved_regs); __ PrepareCallCFunction(2, 0, a2); @@ -1178,7 +1237,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, // registers. __ MultiPush(kJSCallerSaved | kCalleeSaved); // Pass the function and deoptimization type to the runtime system. - __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); + __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles); __ MultiPop(kJSCallerSaved | kCalleeSaved); } @@ -1204,7 +1263,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, // Pass the function and deoptimization type to the runtime system. __ li(a0, Operand(Smi::FromInt(static_cast<int>(type)))); __ push(a0); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + __ CallRuntime(Runtime::kNotifyDeoptimized); } // Get the full codegen state from the stack and untag it -> t2. @@ -1246,6 +1305,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { } +// Clobbers {t2, t3, t4, t5}. +static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver, + Register function_template_info, + Label* receiver_check_failed) { + Register signature = t2; + Register map = t3; + Register constructor = t4; + Register scratch = t5; + + // If there is no signature, return the holder. + __ lw(signature, FieldMemOperand(function_template_info, + FunctionTemplateInfo::kSignatureOffset)); + Label receiver_check_passed; + __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex, + &receiver_check_passed); + + // Walk the prototype chain. + __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + Label prototype_loop_start; + __ bind(&prototype_loop_start); + + // Get the constructor, if any. + __ GetMapConstructor(constructor, map, scratch, scratch); + Label next_prototype; + __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE)); + Register type = constructor; + __ lw(type, + FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset)); + __ lw(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset)); + + // Loop through the chain of inheriting function templates. + Label function_template_loop; + __ bind(&function_template_loop); + + // If the signatures match, we have a compatible receiver. + __ Branch(&receiver_check_passed, eq, signature, Operand(type), + USE_DELAY_SLOT); + + // If the current type is not a FunctionTemplateInfo, load the next prototype + // in the chain. + __ JumpIfSmi(type, &next_prototype); + __ GetObjectType(type, scratch, scratch); + __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE)); + + // Otherwise load the parent function template and iterate. + __ lw(type, + FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset)); + __ Branch(&function_template_loop); + + // Load the next prototype and iterate. + __ bind(&next_prototype); + __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset)); + // End if the prototype is null or not hidden. + __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed); + __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset)); + __ DecodeField<Map::IsHiddenPrototype>(scratch); + __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg)); + + __ Branch(&prototype_loop_start); + + __ bind(&receiver_check_passed); +} + + +void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : number of arguments excluding receiver + // -- a1 : callee + // -- ra : return address + // -- sp[0] : last argument + // -- ... + // -- sp[4 * (argc - 1)] : first argument + // -- sp[4 * argc] : receiver + // ----------------------------------- + + // Load the FunctionTemplateInfo. + __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset)); + + // Do the compatible receiver check. + Label receiver_check_failed; + __ sll(at, a0, kPointerSizeLog2); + __ Addu(t8, sp, at); + __ lw(t0, MemOperand(t8)); + CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed); + + // Get the callback offset from the FunctionTemplateInfo, and jump to the + // beginning of the code. + __ lw(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset)); + __ lw(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset)); + __ Addu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(t2); + + // Compatible receiver check failed: throw an Illegal Invocation exception. + __ bind(&receiver_check_failed); + // Drop the arguments (including the receiver); + __ Addu(t8, t8, Operand(kPointerSize)); + __ addu(sp, t8, zero_reg); + __ TailCallRuntime(Runtime::kThrowIllegalInvocation); +} + + void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -1253,7 +1415,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::INTERNAL); // Pass function as argument. __ push(a0); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + __ CallRuntime(Runtime::kCompileForOnStackReplacement); } // If the code object is null, just return to the unoptimized code. @@ -1286,7 +1448,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { __ Branch(&ok, hs, sp, Operand(at)); { FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kStackGuard, 0); + __ CallRuntime(Runtime::kStackGuard); } __ Jump(masm->isolate()->builtins()->OnStackReplacement(), RelocInfo::CODE_TARGET); @@ -1297,7 +1459,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { // static -void Builtins::Generate_FunctionCall(MacroAssembler* masm) { +void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm, + int field_index) { + // ----------- S t a t e ------------- + // -- sp[0] : receiver + // ----------------------------------- + + // 1. Pop receiver into a0 and check that it's actually a JSDate object. + Label receiver_not_date; + { + __ Pop(a0); + __ JumpIfSmi(a0, &receiver_not_date); + __ GetObjectType(a0, t0, t0); + __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE)); + } + + // 2. Load the specified date field, falling back to the runtime as necessary. + if (field_index == JSDate::kDateValue) { + __ Ret(USE_DELAY_SLOT); + __ lw(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot. + } else { + if (field_index < JSDate::kFirstUncachedField) { + Label stamp_mismatch; + __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate()))); + __ lw(a1, MemOperand(a1)); + __ lw(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset)); + __ Branch(&stamp_mismatch, ne, t0, Operand(a1)); + __ Ret(USE_DELAY_SLOT); + __ lw(v0, FieldMemOperand( + a0, JSDate::kValueOffset + + field_index * kPointerSize)); // In delay slot. + __ bind(&stamp_mismatch); + } + FrameScope scope(masm, StackFrame::INTERNAL); + __ PrepareCallCFunction(2, t0); + __ li(a1, Operand(Smi::FromInt(field_index))); + __ CallCFunction( + ExternalReference::get_date_field_function(masm->isolate()), 2); + } + __ Ret(); + + // 3. Raise a TypeError if the receiver is not a date. + __ bind(&receiver_not_date); + __ TailCallRuntime(Runtime::kThrowNotDateError); +} + + +// static +void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : argArray + // -- sp[4] : thisArg + // -- sp[8] : receiver + // ----------------------------------- + + // 1. Load receiver into a1, argArray into a0 (if present), remove all + // arguments from the stack (including the receiver), and push thisArg (if + // present) instead. + { + Label no_arg; + Register scratch = t0; + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ mov(a3, a2); + __ sll(scratch, a0, kPointerSizeLog2); + __ Addu(a0, sp, Operand(scratch)); + __ lw(a1, MemOperand(a0)); // receiver + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a2, MemOperand(a0)); // thisArg + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a3, MemOperand(a0)); // argArray + __ bind(&no_arg); + __ Addu(sp, sp, Operand(scratch)); + __ sw(a2, MemOperand(sp)); + __ mov(a0, a3); + } + + // ----------- S t a t e ------------- + // -- a0 : argArray + // -- a1 : receiver + // -- sp[0] : thisArg + // ----------------------------------- + + // 2. Make sure the receiver is actually callable. + Label receiver_not_callable; + __ JumpIfSmi(a1, &receiver_not_callable); + __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset)); + __ And(t0, t0, Operand(1 << Map::kIsCallable)); + __ Branch(&receiver_not_callable, eq, t0, Operand(zero_reg)); + + // 3. Tail call with no arguments if argArray is null or undefined. + Label no_arguments; + __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments); + __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments); + + // 4a. Apply the receiver to the given argArray (passing undefined for + // new.target). + __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); + __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET); + + // 4b. The argArray is either null or undefined, so we tail call without any + // arguments to the receiver. + __ bind(&no_arguments); + { + __ mov(a0, zero_reg); + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + } + + // 4c. The receiver is not callable, throw an appropriate TypeError. + __ bind(&receiver_not_callable); + { + __ sw(a1, MemOperand(sp)); + __ TailCallRuntime(Runtime::kThrowApplyNonFunction); + } +} + + +// static +void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. // a0: actual number of arguments { @@ -1341,189 +1623,145 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { } -static void Generate_PushAppliedArguments(MacroAssembler* masm, - const int vectorOffset, - const int argumentsOffset, - const int indexOffset, - const int limitOffset) { - Label entry, loop; - Register receiver = LoadDescriptor::ReceiverRegister(); - Register key = LoadDescriptor::NameRegister(); - Register slot = LoadDescriptor::SlotRegister(); - Register vector = LoadWithVectorDescriptor::VectorRegister(); - - __ lw(key, MemOperand(fp, indexOffset)); - __ Branch(&entry); - - // Load the current argument from the arguments array. - __ bind(&loop); - __ lw(receiver, MemOperand(fp, argumentsOffset)); - - // Use inline caching to speed up access to arguments. - int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex(); - __ li(slot, Operand(Smi::FromInt(slot_index))); - __ lw(vector, MemOperand(fp, vectorOffset)); - Handle<Code> ic = - KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode(); - __ Call(ic, RelocInfo::CODE_TARGET); - - __ push(v0); - - // Use inline caching to access the arguments. - __ lw(key, MemOperand(fp, indexOffset)); - __ Addu(key, key, Operand(1 << kSmiTagSize)); - __ sw(key, MemOperand(fp, indexOffset)); - - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ lw(a1, MemOperand(fp, limitOffset)); - __ Branch(&loop, ne, key, Operand(a1)); - - // On exit, the pushed arguments count is in a0, untagged - __ mov(a0, key); - __ SmiUntag(a0); -} - - -// Used by FunctionApply and ReflectApply -static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) { - const int kFormalParameters = targetIsArgument ? 3 : 2; - const int kStackSize = kFormalParameters + 1; +void Builtins::Generate_ReflectApply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : argumentsList + // -- sp[4] : thisArgument + // -- sp[8] : target + // -- sp[12] : receiver + // ----------------------------------- + // 1. Load target into a1 (if present), argumentsList into a0 (if present), + // remove all arguments from the stack (including the receiver), and push + // thisArgument (if present) instead. { - FrameScope frame_scope(masm, StackFrame::INTERNAL); - const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize; - const int kReceiverOffset = kArgumentsOffset + kPointerSize; - const int kFunctionOffset = kReceiverOffset + kPointerSize; - const int kVectorOffset = - InternalFrameConstants::kCodeOffset - 1 * kPointerSize; - - // Push the vector. - __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset)); - __ Push(a1); - - __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function. - __ lw(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array. - __ Push(a0, a1); - // Returns (in v0) number of arguments to copy to stack as Smi. - if (targetIsArgument) { - __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX, - CALL_FUNCTION); - } else { - __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION); - } - - // Returns the result in v0. - Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged); - - // Push current limit and index. - const int kIndexOffset = kVectorOffset - (2 * kPointerSize); - const int kLimitOffset = kVectorOffset - (1 * kPointerSize); - __ mov(a1, zero_reg); - __ lw(a2, MemOperand(fp, kReceiverOffset)); - __ Push(v0, a1, a2); // limit, initial index and receiver. - - // Copy all arguments from the array to the stack. - Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset, - kIndexOffset, kLimitOffset); - - // Call the callable. - // TODO(bmeurer): This should be a tail call according to ES6. - __ lw(a1, MemOperand(fp, kFunctionOffset)); - __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); - - // Tear down the internal frame and remove function, receiver and args. + Label no_arg; + Register scratch = t0; + __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ mov(a2, a1); + __ mov(a3, a1); + __ sll(scratch, a0, kPointerSizeLog2); + __ mov(a0, scratch); + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(zero_reg)); + __ Addu(a0, sp, Operand(a0)); + __ lw(a1, MemOperand(a0)); // target + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a2, MemOperand(a0)); // thisArgument + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a3, MemOperand(a0)); // argumentsList + __ bind(&no_arg); + __ Addu(sp, sp, Operand(scratch)); + __ sw(a2, MemOperand(sp)); + __ mov(a0, a3); } - __ Ret(USE_DELAY_SLOT); - __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot. -} - - -static void Generate_ConstructHelper(MacroAssembler* masm) { - const int kFormalParameters = 3; - const int kStackSize = kFormalParameters + 1; + // ----------- S t a t e ------------- + // -- a0 : argumentsList + // -- a1 : target + // -- sp[0] : thisArgument + // ----------------------------------- + // 2. Make sure the target is actually callable. + Label target_not_callable; + __ JumpIfSmi(a1, &target_not_callable); + __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset)); + __ And(t0, t0, Operand(1 << Map::kIsCallable)); + __ Branch(&target_not_callable, eq, t0, Operand(zero_reg)); + + // 3a. Apply the target to the given argumentsList (passing undefined for + // new.target). + __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); + __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET); + + // 3b. The target is not callable, throw an appropriate TypeError. + __ bind(&target_not_callable); { - FrameScope frame_scope(masm, StackFrame::INTERNAL); - const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize; - const int kArgumentsOffset = kNewTargetOffset + kPointerSize; - const int kFunctionOffset = kArgumentsOffset + kPointerSize; - const int kVectorOffset = - InternalFrameConstants::kCodeOffset - 1 * kPointerSize; - - // Push the vector. - __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset)); - __ Push(a1); - - // If newTarget is not supplied, set it to constructor - Label validate_arguments; - __ lw(a0, MemOperand(fp, kNewTargetOffset)); - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Branch(&validate_arguments, ne, a0, Operand(at)); - __ lw(a0, MemOperand(fp, kFunctionOffset)); - __ sw(a0, MemOperand(fp, kNewTargetOffset)); - - // Validate arguments - __ bind(&validate_arguments); - __ lw(a0, MemOperand(fp, kFunctionOffset)); // get the function - __ push(a0); - __ lw(a0, MemOperand(fp, kArgumentsOffset)); // get the args array - __ push(a0); - __ lw(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target - __ push(a0); - // Returns argument count in v0. - __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX, - CALL_FUNCTION); - - // Returns result in v0. - Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged); - - // Push current limit and index. - const int kIndexOffset = kVectorOffset - (2 * kPointerSize); - const int kLimitOffset = kVectorOffset - (1 * kPointerSize); - __ push(v0); // limit - __ mov(a1, zero_reg); // initial index - __ push(a1); - // Push the constructor function as callee. - __ lw(a0, MemOperand(fp, kFunctionOffset)); - __ push(a0); - - // Copy all arguments from the array to the stack. - Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset, - kIndexOffset, kLimitOffset); - - // Use undefined feedback vector - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - __ lw(a1, MemOperand(fp, kFunctionOffset)); - __ lw(t0, MemOperand(fp, kNewTargetOffset)); - - // Call the function. - CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL); - __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); - - // Leave internal frame. + __ sw(a1, MemOperand(sp)); + __ TailCallRuntime(Runtime::kThrowApplyNonFunction); } - __ jr(ra); - __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot. } -void Builtins::Generate_FunctionApply(MacroAssembler* masm) { - Generate_ApplyHelper(masm, false); -} +void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc + // -- sp[0] : new.target (optional) + // -- sp[4] : argumentsList + // -- sp[8] : target + // -- sp[12] : receiver + // ----------------------------------- + // 1. Load target into a1 (if present), argumentsList into a0 (if present), + // new.target into a3 (if present, otherwise use target), remove all + // arguments from the stack (including the receiver), and push thisArgument + // (if present) instead. + { + Label no_arg; + Register scratch = t0; + __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ mov(a2, a1); + __ sll(scratch, a0, kPointerSizeLog2); + __ Addu(a0, sp, Operand(scratch)); + __ sw(a2, MemOperand(a0)); // receiver + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a1, MemOperand(a0)); // target + __ mov(a3, a1); // new.target defaults to target + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a2, MemOperand(a0)); // argumentsList + __ Subu(a0, a0, Operand(kPointerSize)); + __ Branch(&no_arg, lt, a0, Operand(sp)); + __ lw(a3, MemOperand(a0)); // new.target + __ bind(&no_arg); + __ Addu(sp, sp, Operand(scratch)); + __ mov(a0, a2); + } -void Builtins::Generate_ReflectApply(MacroAssembler* masm) { - Generate_ApplyHelper(masm, true); -} + // ----------- S t a t e ------------- + // -- a0 : argumentsList + // -- a3 : new.target + // -- a1 : target + // -- sp[0] : receiver (undefined) + // ----------------------------------- + // 2. Make sure the target is actually a constructor. + Label target_not_constructor; + __ JumpIfSmi(a1, &target_not_constructor); + __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset)); + __ And(t0, t0, Operand(1 << Map::kIsConstructor)); + __ Branch(&target_not_constructor, eq, t0, Operand(zero_reg)); + + // 3. Make sure the target is actually a constructor. + Label new_target_not_constructor; + __ JumpIfSmi(a3, &new_target_not_constructor); + __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset)); + __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset)); + __ And(t0, t0, Operand(1 << Map::kIsConstructor)); + __ Branch(&new_target_not_constructor, eq, t0, Operand(zero_reg)); + + // 4a. Construct the target with the given new.target and argumentsList. + __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET); + + // 4b. The target is not a constructor, throw an appropriate TypeError. + __ bind(&target_not_constructor); + { + __ sw(a1, MemOperand(sp)); + __ TailCallRuntime(Runtime::kThrowCalledNonCallable); + } -void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { - Generate_ConstructHelper(masm); + // 4c. The new.target is not a constructor, throw an appropriate TypeError. + __ bind(&new_target_not_constructor); + { + __ sw(a3, MemOperand(sp)); + __ TailCallRuntime(Runtime::kThrowCalledNonCallable); + } } @@ -1533,6 +1771,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm, // -- a0 : actual number of arguments // -- a1 : function (passed through to callee) // -- a2 : expected number of arguments + // -- a3 : new target (passed through to callee) // ----------------------------------- // Check the stack for overflow. We are not trying to catch // interruptions (e.g. debug break and preemption) here, so the "real stack @@ -1575,6 +1814,130 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // static +void Builtins::Generate_Apply(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argumentsList + // -- a1 : target + // -- a3 : new.target (checked to be constructor or undefined) + // -- sp[0] : thisArgument + // ----------------------------------- + + // Create the list of arguments from the array-like argumentsList. + { + Label create_arguments, create_array, create_runtime, done_create; + __ JumpIfSmi(a0, &create_runtime); + + // Load the map of argumentsList into a2. + __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); + + // Load native context into t0. + __ lw(t0, NativeContextMemOperand()); + + // Check if argumentsList is an (unmodified) arguments object. + __ lw(at, ContextMemOperand(t0, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + __ Branch(&create_arguments, eq, a2, Operand(at)); + __ lw(at, ContextMemOperand(t0, Context::STRICT_ARGUMENTS_MAP_INDEX)); + __ Branch(&create_arguments, eq, a2, Operand(at)); + + // Check if argumentsList is a fast JSArray. + __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset)); + __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE)); + + // Ask the runtime to create the list (actually a FixedArray). + __ bind(&create_runtime); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1, a3, a0); + __ CallRuntime(Runtime::kCreateListFromArrayLike); + __ mov(a0, v0); + __ Pop(a1, a3); + __ lw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); + __ SmiUntag(a2); + } + __ Branch(&done_create); + + // Try to create the list from an arguments object. + __ bind(&create_arguments); + __ lw(a2, + FieldMemOperand(a0, JSObject::kHeaderSize + + Heap::kArgumentsLengthIndex * kPointerSize)); + __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset)); + __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset)); + __ Branch(&create_runtime, ne, a2, Operand(at)); + __ SmiUntag(a2); + __ mov(a0, t0); + __ Branch(&done_create); + + // Try to create the list from a JSArray object. + __ bind(&create_array); + __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset)); + __ DecodeField<Map::ElementsKindBits>(a2); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS)); + __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS)); + __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset)); + __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset)); + __ SmiUntag(a2); + + __ bind(&done_create); + } + + // Check for stack overflow. + { + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + Label done; + __ LoadRoot(t0, Heap::kRealStackLimitRootIndex); + // Make ip the space we have left. The stack might already be overflowed + // here which will cause ip to become negative. + __ Subu(t0, sp, t0); + // Check if the arguments will overflow the stack. + __ sll(at, a2, kPointerSizeLog2); + __ Branch(&done, gt, t0, Operand(at)); // Signed comparison. + __ TailCallRuntime(Runtime::kThrowStackOverflow); + __ bind(&done); + } + + // ----------- S t a t e ------------- + // -- a1 : target + // -- a0 : args (a FixedArray built from argumentsList) + // -- a2 : len (number of elements to push from args) + // -- a3 : new.target (checked to be constructor or undefined) + // -- sp[0] : thisArgument + // ----------------------------------- + + // Push arguments onto the stack (thisArgument is already on the stack). + { + __ mov(t0, zero_reg); + Label done, loop; + __ bind(&loop); + __ Branch(&done, eq, t0, Operand(a2)); + __ sll(at, t0, kPointerSizeLog2); + __ Addu(at, a0, at); + __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize)); + __ Push(at); + __ Addu(t0, t0, Operand(1)); + __ Branch(&loop); + __ bind(&done); + __ Move(a0, t0); + } + + // Dispatch to Call or Construct depending on whether new.target is undefined. + { + Label construct; + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&construct, ne, a3, Operand(at)); + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + __ bind(&construct); + __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); + } +} + + +// static void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- @@ -1669,21 +2032,117 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); __ sra(a2, a2, kSmiTagSize); // Un-tag. - __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); ParameterCount actual(a0); ParameterCount expected(a2); - __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper()); + __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION, + CheckDebugStepCallWrapper()); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); { FrameScope frame(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); } } // static +void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments (not including the receiver) + // -- a1 : the function to call (checked to be a JSBoundFunction) + // ----------------------------------- + __ AssertBoundFunction(a1); + + // Patch the receiver to [[BoundThis]]. + { + __ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); + __ sll(t0, a0, kPointerSizeLog2); + __ addu(t0, t0, sp); + __ sw(at, MemOperand(t0)); + } + + // Load [[BoundArguments]] into a2 and length of that into t0. + __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); + __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ SmiUntag(t0); + + // ----------- S t a t e ------------- + // -- a0 : the number of arguments (not including the receiver) + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a2 : the [[BoundArguments]] (implemented as FixedArray) + // -- t0 : the number of [[BoundArguments]] + // ----------------------------------- + + // Reserve stack space for the [[BoundArguments]]. + { + Label done; + __ sll(t1, t0, kPointerSizeLog2); + __ Subu(sp, sp, Operand(t1)); + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + __ LoadRoot(at, Heap::kRealStackLimitRootIndex); + __ Branch(&done, gt, sp, Operand(at)); // Signed comparison. + // Restore the stack pointer. + __ Addu(sp, sp, Operand(t1)); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + } + __ bind(&done); + } + + // Relocate arguments down the stack. + { + Label loop, done_loop; + __ mov(t1, zero_reg); + __ bind(&loop); + __ Branch(&done_loop, gt, t1, Operand(a0)); + __ sll(t2, t0, kPointerSizeLog2); + __ addu(t2, t2, sp); + __ lw(at, MemOperand(t2)); + __ sll(t2, t1, kPointerSizeLog2); + __ addu(t2, t2, sp); + __ sw(at, MemOperand(t2)); + __ Addu(t0, t0, Operand(1)); + __ Addu(t1, t1, Operand(1)); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Copy [[BoundArguments]] to the stack (below the arguments). + { + Label loop, done_loop; + __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ SmiUntag(t0); + __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + __ Subu(t0, t0, Operand(1)); + __ Branch(&done_loop, lt, t0, Operand(zero_reg)); + __ sll(t1, t0, kPointerSizeLog2); + __ addu(t1, t1, a2); + __ lw(at, MemOperand(t1)); + __ sll(t1, a0, kPointerSizeLog2); + __ addu(t1, t1, sp); + __ sw(at, MemOperand(t1)); + __ Addu(a0, a0, Operand(1)); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Call the [[BoundTargetFunction]] via the Call builtin. + __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny, + masm->isolate()))); + __ lw(at, MemOperand(at)); + __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(at); +} + + +// static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- // -- a0 : the number of arguments (not including the receiver) @@ -1696,13 +2155,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ GetObjectType(a1, t1, t2); __ Jump(masm->isolate()->builtins()->CallFunction(mode), RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); - __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE)); - - // 1. Call to function proxy. - // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies. - __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset)); - __ AssertNotSmi(a1); - __ Branch(&non_smi); + __ Jump(masm->isolate()->builtins()->CallBoundFunction(), + RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE)); + + // 1. Runtime fallback for Proxy [[Call]]. + __ Push(a1); + // Increase the arguments size to include the pushed function and the + // existing receiver on the stack. + __ Addu(a0, a0, 2); + // Tail-call to the runtime. + __ JumpToExternalReference( + ExternalReference(Runtime::kJSProxyCall, masm->isolate())); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). @@ -1716,7 +2180,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ addu(at, sp, at); __ sw(a1, MemOperand(at)); // Let the "call_as_function_delegate" take care of the rest. - __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); + __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); __ Jump(masm->isolate()->builtins()->CallFunction( ConvertReceiverMode::kNotNullOrUndefined), RelocInfo::CODE_TARGET); @@ -1726,7 +2190,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { { FrameScope scope(masm, StackFrame::INTERNAL); __ Push(a1); - __ CallRuntime(Runtime::kThrowCalledNonCallable, 1); + __ CallRuntime(Runtime::kThrowCalledNonCallable); } } @@ -1736,10 +2200,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : the number of arguments (not including the receiver) // -- a1 : the constructor to call (checked to be a JSFunction) - // -- a3 : the original constructor (checked to be a JSFunction) + // -- a3 : the new target (checked to be a constructor) // ----------------------------------- __ AssertFunction(a1); - __ AssertFunction(a3); // Calling convention for function specific ConstructStubs require // a2 to contain either an AllocationSite or undefined. @@ -1755,17 +2218,117 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static +void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : the number of arguments (not including the receiver) + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a3 : the new target (checked to be a constructor) + // ----------------------------------- + __ AssertBoundFunction(a1); + + // Load [[BoundArguments]] into a2 and length of that into t0. + __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); + __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ SmiUntag(t0); + + // ----------- S t a t e ------------- + // -- a0 : the number of arguments (not including the receiver) + // -- a1 : the function to call (checked to be a JSBoundFunction) + // -- a2 : the [[BoundArguments]] (implemented as FixedArray) + // -- a3 : the new target (checked to be a constructor) + // -- t0 : the number of [[BoundArguments]] + // ----------------------------------- + + // Reserve stack space for the [[BoundArguments]]. + { + Label done; + __ sll(t1, t0, kPointerSizeLog2); + __ Subu(sp, sp, Operand(t1)); + // Check the stack for overflow. We are not trying to catch interruptions + // (i.e. debug break and preemption) here, so check the "real stack limit". + __ LoadRoot(at, Heap::kRealStackLimitRootIndex); + __ Branch(&done, gt, sp, Operand(at)); // Signed comparison. + // Restore the stack pointer. + __ Addu(sp, sp, Operand(t1)); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ EnterFrame(StackFrame::INTERNAL); + __ CallRuntime(Runtime::kThrowStackOverflow); + } + __ bind(&done); + } + + // Relocate arguments down the stack. + { + Label loop, done_loop; + __ mov(t1, zero_reg); + __ bind(&loop); + __ Branch(&done_loop, ge, t1, Operand(a0)); + __ sll(t2, t0, kPointerSizeLog2); + __ addu(t2, t2, sp); + __ lw(at, MemOperand(t2)); + __ sll(t2, t1, kPointerSizeLog2); + __ addu(t2, t2, sp); + __ sw(at, MemOperand(t2)); + __ Addu(t0, t0, Operand(1)); + __ Addu(t1, t1, Operand(1)); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Copy [[BoundArguments]] to the stack (below the arguments). + { + Label loop, done_loop; + __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); + __ SmiUntag(t0); + __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + __ Subu(t0, t0, Operand(1)); + __ Branch(&done_loop, lt, t0, Operand(zero_reg)); + __ sll(t1, t0, kPointerSizeLog2); + __ addu(t1, t1, a2); + __ lw(at, MemOperand(t1)); + __ sll(t1, a0, kPointerSizeLog2); + __ addu(t1, t1, sp); + __ sw(at, MemOperand(t1)); + __ Addu(a0, a0, Operand(1)); + __ Branch(&loop); + __ bind(&done_loop); + } + + // Patch new.target to [[BoundTargetFunction]] if new.target equals target. + { + Label skip_load; + __ Branch(&skip_load, ne, a1, Operand(a3)); + __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ bind(&skip_load); + } + + // Construct the [[BoundTargetFunction]] via the Construct builtin. + __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); + __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate()))); + __ lw(at, MemOperand(at)); + __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(at); +} + + +// static void Builtins::Generate_ConstructProxy(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : the number of arguments (not including the receiver) - // -- a1 : the constructor to call (checked to be a JSFunctionProxy) - // -- a3 : the original constructor (either the same as the constructor or + // -- a1 : the constructor to call (checked to be a JSProxy) + // -- a3 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) // ----------------------------------- - // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies. - __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset)); - __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + // Call into the Runtime for Proxy [[Construct]]. + __ Push(a1, a3); + // Include the pushed new_target, constructor and the receiver. + __ Addu(a0, a0, Operand(3)); + // Tail-call to the runtime. + __ JumpToExternalReference( + ExternalReference(Runtime::kJSProxyConstruct, masm->isolate())); } @@ -1774,24 +2337,33 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : the number of arguments (not including the receiver) // -- a1 : the constructor to call (can be any Object) - // -- a3 : the original constructor (either the same as the constructor or + // -- a3 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) // ----------------------------------- - // Check if target has a [[Construct]] internal method. + // Check if target is a Smi. Label non_constructor; __ JumpIfSmi(a1, &non_constructor); - __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t2, t2, Operand(1 << Map::kIsCallable)); - __ Branch(&non_constructor, eq, t2, Operand(zero_reg)); // Dispatch based on instance type. + __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset)); __ Jump(masm->isolate()->builtins()->ConstructFunction(), RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); + + // Check if target has a [[Construct]] internal method. + __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); + __ And(t3, t3, Operand(1 << Map::kIsConstructor)); + __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); + + // Only dispatch to bound functions after checking whether they are + // constructors. + __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(), + RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + + // Only dispatch to proxies after checking whether they are constructors. __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET, - eq, t2, Operand(JS_FUNCTION_PROXY_TYPE)); + eq, t2, Operand(JS_PROXY_TYPE)); // Called Construct on an exotic Object with a [[Construct]] internal method. { @@ -1800,7 +2372,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ addu(at, sp, at); __ sw(a1, MemOperand(at)); // Let the "call_as_constructor_delegate" take care of the rest. - __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); + __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET); } @@ -1808,11 +2380,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Called Construct on an Object that doesn't have a [[Construct]] internal // method. __ bind(&non_constructor); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); - __ CallRuntime(Runtime::kThrowCalledNonCallable, 1); - } + __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(), + RelocInfo::CODE_TARGET); } @@ -1822,14 +2391,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // -- a0: actual arguments count // -- a1: function (passed through to callee) // -- a2: expected arguments count + // -- a3: new target (passed through to callee) // ----------------------------------- - Label stack_overflow; - ArgumentAdaptorStackCheck(masm, &stack_overflow); - Label invoke, dont_adapt_arguments; + Label invoke, dont_adapt_arguments, stack_overflow; Label enough, too_few; - __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); __ Branch(&dont_adapt_arguments, eq, a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); // We use Uless as the number of argument should always be greater than 0. @@ -1839,9 +2406,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // a0: actual number of arguments as a smi // a1: function // a2: expected number of arguments - // a3: code entry to call + // a3: new target (passed through to callee) __ bind(&enough); EnterArgumentsAdaptorFrame(masm); + ArgumentAdaptorStackCheck(masm, &stack_overflow); // Calculate copy start address into a0 and copy end address into t1. __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); @@ -1856,7 +2424,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // a0: copy start address // a1: function // a2: expected number of arguments - // a3: code entry to call + // a3: new target (passed through to callee) // t1: copy end address Label copy; @@ -1888,17 +2456,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { { FrameScope frame(masm, StackFrame::MANUAL); EnterArgumentsAdaptorFrame(masm); - __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0); + __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments); } __ bind(&no_strong_error); EnterArgumentsAdaptorFrame(masm); + ArgumentAdaptorStackCheck(masm, &stack_overflow); // Calculate copy start address into a0 and copy end address into t3. // a0: actual number of arguments as a smi // a1: function // a2: expected number of arguments - // a3: code entry to call + // a3: new target (passed through to callee) __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); __ Addu(a0, fp, a0); // Adjust for return address and receiver. @@ -1910,7 +2479,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // a0: copy start address // a1: function // a2: expected number of arguments - // a3: code entry to call + // a3: new target (passed through to callee) // t3: copy end address Label copy; __ bind(©); @@ -1923,7 +2492,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Fill the remaining expected arguments with undefined. // a1: function // a2: expected number of arguments - // a3: code entry to call + // a3: new target (passed through to callee) __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); __ sll(t2, a2, kPointerSizeLog2); __ Subu(t1, fp, Operand(t2)); @@ -1943,7 +2512,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ mov(a0, a2); // a0 : expected number of arguments // a1 : function (passed through to callee) - __ Call(a3); + // a3 : new target (passed through to callee) + __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ Call(t0); // Store offset of return address for deoptimizer. masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); @@ -1957,13 +2528,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Don't adapt arguments. // ------------------------------------------- __ bind(&dont_adapt_arguments); - __ Jump(a3); + __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ Jump(t0); __ bind(&stack_overflow); { FrameScope frame(masm, StackFrame::MANUAL); - EnterArgumentsAdaptorFrame(masm); - __ CallRuntime(Runtime::kThrowStackOverflow, 0); + __ CallRuntime(Runtime::kThrowStackOverflow); __ break_(0xCC); } } diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 0b536504c2..f88d3bd5b4 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -291,7 +291,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, __ GetObjectType(a0, t4, t4); if (cc == less || cc == greater) { // Call runtime on identical JSObjects. - __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE)); // Call runtime on identical symbols since we need to throw a TypeError. __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE)); // Call runtime on identical SIMD values since we must throw a TypeError. @@ -307,7 +307,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); // Comparing JS objects with <=, >= is complicated. if (cc != eq) { - __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE)); // Call runtime on identical symbols since we need to throw a TypeError. __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE)); // Call runtime on identical SIMD values since we must throw a TypeError. @@ -459,12 +459,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); Label first_non_object; // Get the type of the first operand into a2 and compare it with - // FIRST_SPEC_OBJECT_TYPE. + // FIRST_JS_RECEIVER_TYPE. __ GetObjectType(lhs, a2, a2); - __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE)); // Return non-zero. Label return_not_equal; @@ -477,7 +477,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE)); __ GetObjectType(rhs, a3, a3); - __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE)); // Check for oddballs: true, false, null, undefined. __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE)); @@ -539,9 +539,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, __ li(v0, Operand(1)); // Non-zero indicates not equal. __ bind(&object_test); - __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE)); __ GetObjectType(rhs, a2, a3); - __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE)); // If both objects are undetectable, they are equal. Otherwise, they // are not equal, since they are different objects and an object is not @@ -728,8 +728,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { __ Push(lhs, rhs); // Figure out which native to call and setup the arguments. if (cc == eq) { - __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2, - 1); + __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals); } else { int ncr; // NaN compare result. if (cc == lt || cc == le) { @@ -743,9 +742,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. - __ TailCallRuntime( - is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3, - 1); + __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong + : Runtime::kCompare); } __ bind(&miss); @@ -979,7 +977,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { if (exponent_type() == ON_STACK) { // The arguments are still on the stack. __ bind(&call_runtime); - __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); + __ TailCallRuntime(Runtime::kMathPowRT); // The stub is called from non-optimized code, which expects the result // as heap number in exponent. @@ -1472,15 +1470,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) { __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype)); __ Branch(&slow_case, ne, at, Operand(zero_reg)); - // Ensure that {function} is not bound. - Register const shared_info = scratch; - __ lw(shared_info, - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - __ lbu(scratch, - FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset)); - __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte)); - __ Branch(&slow_case, ne, at, Operand(zero_reg)); - // Get the "prototype" (or initial map) of the {function}. __ lw(function_prototype, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); @@ -1505,25 +1494,49 @@ void InstanceOfStub::Generate(MacroAssembler* masm) { // Loop through the prototype chain looking for the {function} prototype. // Assume true, and change to false if not found. - Register const object_prototype = object_map; + Register const object_instance_type = function_map; + Register const map_bit_field = function_map; Register const null = scratch; - Label done, loop; - __ LoadRoot(v0, Heap::kTrueValueRootIndex); + Register const result = v0; + + Label done, loop, fast_runtime_fallback; + __ LoadRoot(result, Heap::kTrueValueRootIndex); __ LoadRoot(null, Heap::kNullValueRootIndex); __ bind(&loop); - __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); - __ Branch(&done, eq, object_prototype, Operand(function_prototype)); - __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null)); - __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); - __ LoadRoot(v0, Heap::kFalseValueRootIndex); + + // Check if the object needs to be access checked. + __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset)); + __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded)); + __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg)); + // Check if the current object is a Proxy. + __ lbu(object_instance_type, + FieldMemOperand(object_map, Map::kInstanceTypeOffset)); + __ Branch(&fast_runtime_fallback, eq, object_instance_type, + Operand(JS_PROXY_TYPE)); + + __ lw(object, FieldMemOperand(object_map, Map::kPrototypeOffset)); + __ Branch(&done, eq, object, Operand(function_prototype)); + __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null)); + __ lw(object_map, + FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot. + __ LoadRoot(result, Heap::kFalseValueRootIndex); __ bind(&done); __ Ret(USE_DELAY_SLOT); - __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot. - - // Slow-case: Call the runtime function. + __ StoreRoot(result, + Heap::kInstanceofCacheAnswerRootIndex); // In delay slot. + + // Found Proxy or access check needed: Call the runtime + __ bind(&fast_runtime_fallback); + __ Push(object, function_prototype); + // Invalidate the instanceof cache. + DCHECK(Smi::FromInt(0) == 0); + __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex); + __ TailCallRuntime(Runtime::kHasInPrototypeChain); + + // Slow-case: Call the %InstanceOf runtime function. __ bind(&slow_case); __ Push(object, function); - __ TailCallRuntime(Runtime::kInstanceOf, 2, 1); + __ TailCallRuntime(Runtime::kInstanceOf); } @@ -1594,7 +1607,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // by calling the runtime system. __ bind(&slow); __ push(a1); - __ TailCallRuntime(Runtime::kArguments, 1, 1); + __ TailCallRuntime(Runtime::kArguments); } @@ -1622,7 +1635,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { __ bind(&runtime); __ Push(a1, a3, a2); - __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments); } @@ -1688,7 +1701,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize)); // Do the allocation of all three objects in one go. - __ Allocate(t5, v0, t0, t5, &runtime, TAG_OBJECT); + __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT); // v0 = address of new object(s) (tagged) // a2 = argument count (smi-tagged) @@ -1698,8 +1711,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { const int kAliasedOffset = Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); - __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset)); + __ lw(t0, NativeContextMemOperand()); Label skip2_ne, skip2_eq; __ Branch(&skip2_ne, ne, t2, Operand(zero_reg)); __ lw(t0, MemOperand(t0, kNormalOffset)); @@ -1837,7 +1849,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { // t1 = argument count (tagged) __ bind(&runtime); __ Push(a1, a3, t1); - __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); + __ TailCallRuntime(Runtime::kNewSloppyArguments); } @@ -1856,7 +1868,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) { __ Push(receiver, key); // Receiver, key. // Perform tail call to the entry. - __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1); + __ TailCallRuntime(Runtime::kLoadElementWithInterceptor); __ bind(&slow); PropertyAccessCompiler::TailCallBuiltin( @@ -1902,10 +1914,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); // Get the arguments boilerplate from the current native context. - __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset)); - __ lw(t0, MemOperand( - t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX))); + __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0); __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset)); __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex); @@ -1953,7 +1962,32 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Do the runtime call to allocate the arguments object. __ bind(&runtime); __ Push(a1, a3, a2); - __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1); + __ TailCallRuntime(Runtime::kNewStrictArguments); +} + + +void RestParamAccessStub::GenerateNew(MacroAssembler* masm) { + // a2 : number of parameters (tagged) + // a3 : parameters pointer + // a1 : rest parameter index (tagged) + // Check if the calling frame is an arguments adaptor frame. + + Label runtime; + __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset)); + __ Branch(&runtime, ne, t1, + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + + // Patch the arguments.length and the parameters pointer. + __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize); + __ Addu(a3, t0, Operand(t1)); + __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ Push(a2, a3, a1); + __ TailCallRuntime(Runtime::kNewRestParam); } @@ -1962,7 +1996,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExec); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -2247,7 +2281,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ Branch(&runtime, eq, v0, Operand(a1)); // For exception, throw the exception again. - __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExecReThrow); __ bind(&failure); // For failure and exception return null. @@ -2343,7 +2377,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Do the runtime call to execute the regexp. __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExec); // Deferred code for string handling. // (6) Not a long external string? If yes, go to (8). @@ -2389,19 +2423,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { } -static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub, - bool is_super) { +static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) { // a0 : number of arguments to the construct function // a2 : feedback vector // a3 : slot in feedback vector (Smi) // a1 : the function to call - // t0 : original constructor (for IsSuperConstructorCall) FrameScope scope(masm, StackFrame::INTERNAL); - const RegList kSavedRegs = 1 << 4 | // a0 - 1 << 5 | // a1 - 1 << 6 | // a2 - 1 << 7 | // a3 - BoolToInt(is_super) << 8; // t0 + const RegList kSavedRegs = 1 << 4 | // a0 + 1 << 5 | // a1 + 1 << 6 | // a2 + 1 << 7; // a3 // Number-of-arguments register must be smi-tagged to call out. __ SmiTag(a0); @@ -2414,7 +2445,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub, } -static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) { +static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a feedback vector slot. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. @@ -2422,7 +2453,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) { // a1 : the function to call // a2 : feedback vector // a3 : slot in feedback vector (Smi) - // t0 : original constructor (for IsSuperConstructorCall) Label initialize, done, miss, megamorphic, not_array_function; DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()), @@ -2463,7 +2493,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) { __ Branch(&miss, ne, feedback_map, Operand(at)); // Make sure the function is the Array() function - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2); + __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2); __ Branch(&megamorphic, ne, a1, Operand(t2)); __ jmp(&done); @@ -2485,19 +2515,19 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) { // An uninitialized cache is patched with the function. __ bind(&initialize); // Make sure the function is the Array() function. - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2); + __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2); __ Branch(¬_array_function, ne, a1, Operand(t2)); // The target function is the Array constructor, // Create an AllocationSite if we don't already have it, store it in the // slot. CreateAllocationSiteStub create_stub(masm->isolate()); - CallStubInRecordCallTarget(masm, &create_stub, is_super); + CallStubInRecordCallTarget(masm, &create_stub); __ Branch(&done); __ bind(¬_array_function); CreateWeakCellStub weak_cell_stub(masm->isolate()); - CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super); + CallStubInRecordCallTarget(masm, &weak_cell_stub); __ bind(&done); } @@ -2507,7 +2537,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) { // a1 : the function to call // a2 : feedback vector // a3 : slot in feedback vector (Smi, for RecordCallTarget) - // t0 : original constructor (for IsSuperConstructorCall) Label non_function; // Check that the function is not a smi. @@ -2516,29 +2545,23 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ GetObjectType(a1, t1, t1); __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE)); - if (RecordCallTarget()) { - GenerateRecordCallTarget(masm, IsSuperConstructorCall()); + GenerateRecordCallTarget(masm); - __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t1, a2, at); - Label feedback_register_initialized; - // Put the AllocationSite from the feedback vector into a2, or undefined. - __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize)); - __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset)); - __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); - __ Branch(&feedback_register_initialized, eq, t1, Operand(at)); - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - __ bind(&feedback_register_initialized); + __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); + __ Addu(t1, a2, at); + Label feedback_register_initialized; + // Put the AllocationSite from the feedback vector into a2, or undefined. + __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize)); + __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&feedback_register_initialized, eq, t1, Operand(at)); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ bind(&feedback_register_initialized); - __ AssertUndefinedOrAllocationSite(a2, t1); - } + __ AssertUndefinedOrAllocationSite(a2, t1); - // Pass function as original constructor. - if (IsSuperConstructorCall()) { - __ mov(a3, t0); - } else { - __ mov(a3, a1); - } + // Pass function as new target. + __ mov(a3, a1); // Tail call to the function-specific construct stub (still in the caller // context at this point). @@ -2558,7 +2581,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { // a3 - slot id // a2 - vector // t0 - loaded from vector[slot] - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at); + __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at); __ Branch(miss, ne, a1, Operand(at)); __ li(a0, Operand(arg_count())); @@ -2581,11 +2604,7 @@ void CallICStub::Generate(MacroAssembler* masm) { // a1 - function // a3 - slot id (Smi) // a2 - vector - const int with_types_offset = - FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex); - const int generic_offset = - FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); - Label extra_checks_or_miss, call; + Label extra_checks_or_miss, call, call_function; int argc = arg_count(); ParameterCount actual(argc); @@ -2622,9 +2641,11 @@ void CallICStub::Generate(MacroAssembler* masm) { __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); - __ bind(&call); - __ li(a0, Operand(argc)); - __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + __ bind(&call_function); + __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()), + RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), + USE_DELAY_SLOT); + __ li(a0, Operand(argc)); // In delay slot. __ bind(&extra_checks_or_miss); Label uninitialized, miss, not_allocation_site; @@ -2659,14 +2680,12 @@ void CallICStub::Generate(MacroAssembler* masm) { __ Addu(t0, a2, Operand(t0)); __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); - // We have to update statistics for runtime profiling. - __ lw(t0, FieldMemOperand(a2, with_types_offset)); - __ Subu(t0, t0, Operand(Smi::FromInt(1))); - __ sw(t0, FieldMemOperand(a2, with_types_offset)); - __ lw(t0, FieldMemOperand(a2, generic_offset)); - __ Addu(t0, t0, Operand(Smi::FromInt(1))); - __ Branch(USE_DELAY_SLOT, &call); - __ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot. + + __ bind(&call); + __ Jump(masm->isolate()->builtins()->Call(convert_mode()), + RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), + USE_DELAY_SLOT); + __ li(a0, Operand(argc)); // In delay slot. __ bind(&uninitialized); @@ -2679,13 +2698,14 @@ void CallICStub::Generate(MacroAssembler* masm) { // Make sure the function is not the Array() function, which requires special // behavior on MISS. - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0); + __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0); __ Branch(&miss, eq, a1, Operand(t0)); - // Update stats. - __ lw(t0, FieldMemOperand(a2, with_types_offset)); - __ Addu(t0, t0, Operand(Smi::FromInt(1))); - __ sw(t0, FieldMemOperand(a2, with_types_offset)); + // Make sure the function belongs to the same native context. + __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); + __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); + __ lw(t1, NativeContextMemOperand()); + __ Branch(&miss, ne, t0, Operand(t1)); // Initialize the call counter. __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); @@ -2705,7 +2725,7 @@ void CallICStub::Generate(MacroAssembler* masm) { __ Pop(a1); } - __ Branch(&call); + __ Branch(&call_function); // We are here because tracing is on or we encountered a MISS case we can't // handle here. @@ -2723,7 +2743,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) { __ Push(a1, a2, a3); // Call the entry. - __ CallRuntime(Runtime::kCallIC_Miss, 3); + __ CallRuntime(Runtime::kCallIC_Miss); // Move result to a1 and exit the internal frame. __ mov(a1, v0); @@ -2791,11 +2811,11 @@ void StringCharCodeAtGenerator::GenerateSlow( __ Push(object_, index_); } if (index_flags_ == STRING_INDEX_IS_NUMBER) { - __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero); } else { DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kNumberToSmi, 1); + __ CallRuntime(Runtime::kNumberToSmi); } // Save the conversion result before the pop instructions below @@ -2823,7 +2843,7 @@ void StringCharCodeAtGenerator::GenerateSlow( call_helper.BeforeCall(masm); __ sll(index_, index_, kSmiTagSize); __ Push(object_, index_); - __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); + __ CallRuntime(Runtime::kStringCharCodeAtRT); __ Move(result_, v0); @@ -2870,7 +2890,7 @@ void StringCharFromCodeGenerator::GenerateSlow( __ bind(&slow_case_); call_helper.BeforeCall(masm); __ push(code_); - __ CallRuntime(Runtime::kStringCharFromCode, 1); + __ CallRuntime(Runtime::kStringCharFromCode); __ Move(result_, v0); call_helper.AfterCall(masm); @@ -3131,7 +3151,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubString); __ bind(&single_char); // v0: original string @@ -3176,7 +3196,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) { __ mov(v0, a0); __ bind(&slow_string); __ push(a0); // Push argument. - __ TailCallRuntime(Runtime::kStringToNumber, 1, 1); + __ TailCallRuntime(Runtime::kStringToNumber); __ bind(¬_string); Label not_oddball; @@ -3186,7 +3206,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) { __ bind(¬_oddball); __ push(a0); // Push argument. - __ TailCallRuntime(Runtime::kToNumber, 1, 1); + __ TailCallRuntime(Runtime::kToNumber); } @@ -3203,7 +3223,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) { __ bind(¬_smi); __ push(a0); // Push argument. - __ TailCallRuntime(Runtime::kToLength, 1, 1); + __ TailCallRuntime(Runtime::kToLength); } @@ -3235,7 +3255,7 @@ void ToStringStub::Generate(MacroAssembler* masm) { __ bind(¬_oddball); __ push(a0); // Push argument. - __ TailCallRuntime(Runtime::kToString, 1, 1); + __ TailCallRuntime(Runtime::kToString); } @@ -3373,7 +3393,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ bind(&runtime); __ Push(a1, a0); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare); } @@ -3412,7 +3432,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) { __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); if (op() != Token::EQ_STRICT && is_strong(strength())) { - __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1); + __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion); } else { if (!Token::IsEqualityOp(op())) { __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset)); @@ -3705,9 +3725,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) { __ bind(&runtime); __ Push(left, right); if (equality) { - __ TailCallRuntime(Runtime::kStringEquals, 2, 1); + __ TailCallRuntime(Runtime::kStringEquals); } else { - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompare); } __ bind(&miss); @@ -3715,18 +3735,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) { } -void CompareICStub::GenerateObjects(MacroAssembler* masm) { - DCHECK(state() == CompareICState::OBJECT); +void CompareICStub::GenerateReceivers(MacroAssembler* masm) { + DCHECK_EQ(CompareICState::RECEIVER, state()); Label miss; __ And(a2, a1, Operand(a0)); __ JumpIfSmi(a2, &miss); + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); __ GetObjectType(a0, a2, a2); - __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); + __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE)); __ GetObjectType(a1, a2, a2); - __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); + __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE)); - DCHECK(GetCondition() == eq); + DCHECK_EQ(eq, GetCondition()); __ Ret(USE_DELAY_SLOT); __ subu(v0, a0, a1); @@ -3735,7 +3756,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) { } -void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) { +void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { Label miss; Handle<WeakCell> cell = Map::WeakCellForMap(known_map_); __ And(a2, a1, a0); @@ -3750,7 +3771,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) { __ Ret(USE_DELAY_SLOT); __ subu(v0, a0, a1); } else if (is_strong(strength())) { - __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1); + __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion); } else { if (op() == Token::LT || op() == Token::LTE) { __ li(a2, Operand(Smi::FromInt(GREATER))); @@ -3758,7 +3779,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) { __ li(a2, Operand(Smi::FromInt(LESS))); } __ Push(a1, a0, a2); - __ TailCallRuntime(Runtime::kCompare, 3, 1); + __ TailCallRuntime(Runtime::kCompare); } __ bind(&miss); @@ -4246,11 +4267,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( // We need extra registers for this, so we push the object and the address // register temporarily. __ Push(regs_.object(), regs_.address()); - __ EnsureNotWhite(regs_.scratch0(), // The value. - regs_.scratch1(), // Scratch. - regs_.object(), // Scratch. - regs_.address(), // Scratch. - &need_incremental_pop_scratch); + __ JumpIfWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); __ Pop(regs_.object(), regs_.address()); regs_.Restore(masm); @@ -4273,73 +4294,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( } -void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : element value to store - // -- a3 : element index as smi - // -- sp[0] : array literal index in function as smi - // -- sp[4] : array literal - // clobbers a1, a2, t0 - // ----------------------------------- - - Label element_done; - Label double_elements; - Label smi_element; - Label slow_elements; - Label fast_elements; - - // Get array literal index, array literal and its map. - __ lw(t0, MemOperand(sp, 0 * kPointerSize)); - __ lw(a1, MemOperand(sp, 1 * kPointerSize)); - __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset)); - - __ CheckFastElements(a2, t1, &double_elements); - // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements - __ JumpIfSmi(a0, &smi_element); - __ CheckFastSmiElements(a2, t1, &fast_elements); - - // Store into the array literal requires a elements transition. Call into - // the runtime. - __ bind(&slow_elements); - // call. - __ Push(a1, a3, a0); - __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset)); - __ Push(t1, t0); - __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - - // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. - __ bind(&fast_elements); - __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); - __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t2, t1, t2); - __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ sw(a0, MemOperand(t2, 0)); - // Update the write barrier for the array store. - __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); - - // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, - // and value is Smi. - __ bind(&smi_element); - __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); - __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t2, t1, t2); - __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); - - // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. - __ bind(&double_elements); - __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a0); -} - - void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { CEntryStub ces(isolate(), 1, kSaveFPRegs); __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); @@ -5071,6 +5025,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ AssertUndefinedOrAllocationSite(a2, t0); } + // Enter the context of the Array function. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + Label subclassing; __ Branch(&subclassing, ne, a1, Operand(a3)); @@ -5090,26 +5047,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { // Subclassing. __ bind(&subclassing); - __ Push(a1); - __ Push(a3); - - // Adjust argc. switch (argument_count()) { case ANY: case MORE_THAN_ONE: - __ li(at, Operand(2)); + __ sll(at, a0, kPointerSizeLog2); + __ addu(at, sp, at); + __ sw(a1, MemOperand(at)); + __ li(at, Operand(3)); __ addu(a0, a0, at); break; case NONE: - __ li(a0, Operand(2)); + __ sw(a1, MemOperand(sp, 0 * kPointerSize)); + __ li(a0, Operand(3)); break; case ONE: - __ li(a0, Operand(3)); + __ sw(a1, MemOperand(sp, 1 * kPointerSize)); + __ li(a0, Operand(4)); break; } - - __ JumpToExternalReference( - ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate())); + __ Push(a3, a2); + __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); } @@ -5195,14 +5152,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { // Go up context chain to the script context. for (int i = 0; i < depth(); ++i) { - __ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); + __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); context_reg = result_reg; } // Load the PropertyCell value at the specified slot. __ sll(at, slot_reg, kPointerSizeLog2); __ Addu(at, at, Operand(context_reg)); - __ lw(result_reg, ContextOperand(at, 0)); + __ lw(result_reg, ContextMemOperand(at, 0)); __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); // Check that value is not the_hole. @@ -5214,7 +5171,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { __ bind(&slow_case); __ SmiTag(slot_reg); __ Push(slot_reg); - __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1); + __ TailCallRuntime(Runtime::kLoadGlobalViaContext); } @@ -5234,14 +5191,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) { // Go up context chain to the script context. for (int i = 0; i < depth(); ++i) { - __ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX)); + __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); context_reg = cell_reg; } // Load the PropertyCell at the specified slot. __ sll(at, slot_reg, kPointerSizeLog2); __ Addu(at, at, Operand(context_reg)); - __ lw(cell_reg, ContextOperand(at, 0)); + __ lw(cell_reg, ContextMemOperand(at, 0)); // Load PropertyDetails for the cell (actually only the cell_type and kind). __ lw(cell_details_reg, @@ -5328,8 +5285,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) { __ Push(slot_reg, value_reg); __ TailCallRuntime(is_strict(language_mode()) ? Runtime::kStoreGlobalViaContext_Strict - : Runtime::kStoreGlobalViaContext_Sloppy, - 2, 1); + : Runtime::kStoreGlobalViaContext_Sloppy); } @@ -5453,7 +5409,7 @@ static void CallApiFunctionAndReturn( // Re-throw by promoting a scheduled exception. __ bind(&promote_scheduled_exception); - __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); + __ TailCallRuntime(Runtime::kPromoteScheduledException); // HandleScope limit has changed. Delete allocated extensions. __ bind(&delete_allocated_handles); diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index 9009ec2692..751095d8d8 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -140,9 +140,8 @@ class RecordWriteStub: public PlatformCodeStub { } static void Patch(Code* stub, Mode mode) { - MacroAssembler masm(NULL, - stub->instruction_start(), - stub->instruction_size()); + MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(), + stub->instruction_size(), CodeObjectRequired::kNo); switch (mode) { case STORE_BUFFER_ONLY: DCHECK(GetMode(stub) == INCREMENTAL || @@ -160,8 +159,8 @@ class RecordWriteStub: public PlatformCodeStub { break; } DCHECK(GetMode(stub) == mode); - CpuFeatures::FlushICache(stub->instruction_start(), - 4 * Assembler::kInstrSize); + Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), + 4 * Assembler::kInstrSize); } DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 4a1255e1b4..2a144d990c 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -18,23 +18,22 @@ namespace internal { #if defined(USE_SIMULATOR) -byte* fast_exp_mips_machine_code = NULL; -double fast_exp_simulator(double x) { - return Simulator::current(Isolate::Current())->CallFP( - fast_exp_mips_machine_code, x, 0); +byte* fast_exp_mips_machine_code = nullptr; +double fast_exp_simulator(double x, Isolate* isolate) { + return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0); } #endif -UnaryMathFunction CreateExpFunction() { - if (!FLAG_fast_math) return &std::exp; +UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) { size_t actual_size; byte* buffer = static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); - if (buffer == NULL) return &std::exp; + if (buffer == nullptr) return nullptr; ExternalReference::InitializeMathExpData(); - MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), + CodeObjectRequired::kNo); { DoubleRegister input = f12; @@ -59,11 +58,11 @@ UnaryMathFunction CreateExpFunction() { masm.GetCode(&desc); DCHECK(!RelocInfo::RequiresRelocation(desc)); - CpuFeatures::FlushICache(buffer, actual_size); + Assembler::FlushICache(isolate, buffer, actual_size); base::OS::ProtectCode(buffer, actual_size); #if !defined(USE_SIMULATOR) - return FUNCTION_CAST<UnaryMathFunction>(buffer); + return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); #else fast_exp_mips_machine_code = buffer; return &fast_exp_simulator; @@ -72,7 +71,8 @@ UnaryMathFunction CreateExpFunction() { #if defined(V8_HOST_ARCH_MIPS) -MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { +MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, + MemCopyUint8Function stub) { #if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \ defined(_MIPS_ARCH_MIPS32RX) return stub; @@ -80,11 +80,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { size_t actual_size; byte* buffer = static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true)); - if (buffer == NULL) return stub; + if (buffer == nullptr) return stub; // This code assumes that cache lines are 32 bytes and if the cache line is // larger it will not work correctly. - MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), + CodeObjectRequired::kNo); { Label lastb, unaligned, aligned, chkw, @@ -597,23 +598,24 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) { masm.GetCode(&desc); DCHECK(!RelocInfo::RequiresRelocation(desc)); - CpuFeatures::FlushICache(buffer, actual_size); + Assembler::FlushICache(isolate, buffer, actual_size); base::OS::ProtectCode(buffer, actual_size); return FUNCTION_CAST<MemCopyUint8Function>(buffer); #endif } #endif -UnaryMathFunction CreateSqrtFunction() { +UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { #if defined(USE_SIMULATOR) - return &std::sqrt; + return nullptr; #else size_t actual_size; byte* buffer = static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); - if (buffer == NULL) return &std::sqrt; + if (buffer == nullptr) return nullptr; - MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), + CodeObjectRequired::kNo); __ MovFromFloatParameter(f12); __ sqrt_d(f0, f12); @@ -624,9 +626,9 @@ UnaryMathFunction CreateSqrtFunction() { masm.GetCode(&desc); DCHECK(!RelocInfo::RequiresRelocation(desc)); - CpuFeatures::FlushICache(buffer, actual_size); + Assembler::FlushICache(isolate, buffer, actual_size); base::OS::ProtectCode(buffer, actual_size); - return FUNCTION_CAST<UnaryMathFunction>(buffer); + return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); #endif } @@ -1187,15 +1189,17 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; #endif -CodeAgingHelper::CodeAgingHelper() { +CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { + USE(isolate); DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); // Since patcher is a large object, allocate it dynamically when needed, // to avoid overloading the stack in stress conditions. // DONT_FLUSH is used because the CodeAgingHelper is initialized early in // the process, before MIPS simulator ICache is setup. - base::SmartPointer<CodePatcher> patcher(new CodePatcher( - young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize, - CodePatcher::DONT_FLUSH)); + base::SmartPointer<CodePatcher> patcher( + new CodePatcher(isolate, young_sequence_.start(), + young_sequence_.length() / Assembler::kInstrSize, + CodePatcher::DONT_FLUSH)); PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); patcher->masm()->Push(ra, fp, cp, a1); patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); @@ -1239,10 +1243,11 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); if (age == kNoAgeCodeAge) { isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); - CpuFeatures::FlushICache(sequence, young_length); + Assembler::FlushICache(isolate, sequence, young_length); } else { Code* stub = GetCodeAgeStub(isolate, age, parity); - CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); + CodePatcher patcher(isolate, sequence, + young_length / Assembler::kInstrSize); // Mark this code sequence for FindPlatformCodeAgeSequence(). patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); // Load the stub address to t9 and call it, diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index 22784fcf53..ad7abb30c5 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -7,7 +7,7 @@ #define V8_MIPS_CODEGEN_MIPS_H_ -#include "src/ast.h" +#include "src/ast/ast.h" #include "src/macro-assembler.h" namespace v8 { diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h index b0c2ebbdf8..8327501b6f 100644 --- a/deps/v8/src/mips/constants-mips.h +++ b/deps/v8/src/mips/constants-mips.h @@ -143,8 +143,11 @@ const int kInvalidFPURegister = -1; const int kFCSRRegister = 31; const int kInvalidFPUControlRegister = -1; const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1; +const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1 << 31); const uint64_t kFPU64InvalidResult = static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1; +const int64_t kFPU64InvalidResultNegative = + static_cast<int64_t>(static_cast<uint64_t>(1) << 63); // FCSR constants. const uint32_t kFCSRInexactFlagBit = 2; @@ -152,12 +155,14 @@ const uint32_t kFCSRUnderflowFlagBit = 3; const uint32_t kFCSROverflowFlagBit = 4; const uint32_t kFCSRDivideByZeroFlagBit = 5; const uint32_t kFCSRInvalidOpFlagBit = 6; +const uint32_t kFCSRNaN2008FlagBit = 18; const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; +const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit; const uint32_t kFCSRFlagMask = kFCSRInexactFlagMask | @@ -256,6 +261,7 @@ const int kRdShift = 11; const int kRdBits = 5; const int kSaShift = 6; const int kSaBits = 5; +const int kLsaSaBits = 2; const int kFunctionShift = 0; const int kFunctionBits = 6; const int kLuiShift = 16; @@ -394,6 +400,7 @@ enum SecondaryField : uint32_t { SRL = ((0U << 3) + 2), SRA = ((0U << 3) + 3), SLLV = ((0U << 3) + 4), + LSA = ((0U << 3) + 5), SRLV = ((0U << 3) + 6), SRAV = ((0U << 3) + 7), @@ -772,7 +779,12 @@ enum FPURoundingMode { kRoundToNearest = RN, kRoundToZero = RZ, kRoundToPlusInf = RP, - kRoundToMinusInf = RM + kRoundToMinusInf = RM, + + mode_round = RN, + mode_ceil = RP, + mode_floor = RM, + mode_trunc = RZ }; const uint32_t kFPURoundingModeMask = 3 << 0; @@ -901,20 +913,21 @@ class Instruction { FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) | FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(SRA) | FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(SRLV) | - FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(MFHI) | - FunctionFieldToBitNumber(MFLO) | FunctionFieldToBitNumber(MULT) | - FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DIV) | - FunctionFieldToBitNumber(DIVU) | FunctionFieldToBitNumber(ADD) | - FunctionFieldToBitNumber(ADDU) | FunctionFieldToBitNumber(SUB) | - FunctionFieldToBitNumber(SUBU) | FunctionFieldToBitNumber(AND) | - FunctionFieldToBitNumber(OR) | FunctionFieldToBitNumber(XOR) | - FunctionFieldToBitNumber(NOR) | FunctionFieldToBitNumber(SLT) | - FunctionFieldToBitNumber(SLTU) | FunctionFieldToBitNumber(TGE) | - FunctionFieldToBitNumber(TGEU) | FunctionFieldToBitNumber(TLT) | - FunctionFieldToBitNumber(TLTU) | FunctionFieldToBitNumber(TEQ) | - FunctionFieldToBitNumber(TNE) | FunctionFieldToBitNumber(MOVZ) | - FunctionFieldToBitNumber(MOVN) | FunctionFieldToBitNumber(MOVCI) | - FunctionFieldToBitNumber(SELEQZ_S) | FunctionFieldToBitNumber(SELNEZ_S); + FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(LSA) | + FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) | + FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(MULTU) | + FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DIVU) | + FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(ADDU) | + FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(SUBU) | + FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) | + FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) | + FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) | + FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) | + FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) | + FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) | + FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) | + FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) | + FunctionFieldToBitNumber(SELNEZ_S); // Get the encoding type of the instruction. @@ -948,6 +961,11 @@ class Instruction { return Bits(kSaShift + kSaBits - 1, kSaShift); } + inline int LsaSaValue() const { + DCHECK(InstructionType() == kRegisterType); + return Bits(kSaShift + kLsaSaBits - 1, kSaShift); + } + inline int FunctionValue() const { DCHECK(InstructionType() == kRegisterType || InstructionType() == kImmediateType); diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc index dff1d30402..1199365b7d 100644 --- a/deps/v8/src/mips/cpu-mips.cc +++ b/deps/v8/src/mips/cpu-mips.cc @@ -23,12 +23,12 @@ namespace internal { void CpuFeatures::FlushICache(void* start, size_t size) { +#if !defined(USE_SIMULATOR) // Nothing to do, flushing no instructions. if (size == 0) { return; } -#if !defined (USE_SIMULATOR) #if defined(ANDROID) // Bionic cacheflush can typically run in userland, avoiding kernel call. char *end = reinterpret_cast<char *>(start) + size; @@ -42,14 +42,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) { V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache"); } #endif // ANDROID -#else // USE_SIMULATOR. - // Not generating mips instructions for C-code. This means that we are - // building a mips emulator based target. We should notify the simulator - // that the Icache was flushed. - // None of this code ends up in the snapshot so there are no issues - // around whether or not to generate the code when building snapshots. - Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size); -#endif // USE_SIMULATOR. +#endif // !USE_SIMULATOR. } } // namespace internal diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 8ea1b0bb3e..a9e30de44d 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -38,14 +38,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } else { pointer = code->instruction_start(); } - CodePatcher patcher(pointer, 1); + CodePatcher patcher(isolate, pointer, 1); patcher.masm()->break_(0xCC); DeoptimizationInputData* data = DeoptimizationInputData::cast(code->deoptimization_data()); int osr_offset = data->OsrPcOffset()->value(); if (osr_offset > 0) { - CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1); + CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset, + 1); osr_patcher.masm()->break_(0xCC); } } @@ -66,7 +67,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); DCHECK(call_size_in_bytes <= patch_size()); - CodePatcher patcher(call_address, call_size_in_words); + CodePatcher patcher(isolate, call_address, call_size_in_words); patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); DCHECK(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc index 5502f4170c..936514aab2 100644 --- a/deps/v8/src/mips/disasm-mips.cc +++ b/deps/v8/src/mips/disasm-mips.cc @@ -66,6 +66,7 @@ class Decoder { // Printing of common values. void PrintRegister(int reg); void PrintFPURegister(int freg); + void PrintFPUStatusRegister(int freg); void PrintRs(Instruction* instr); void PrintRt(Instruction* instr); void PrintRd(Instruction* instr); @@ -73,6 +74,7 @@ class Decoder { void PrintFt(Instruction* instr); void PrintFd(Instruction* instr); void PrintSa(Instruction* instr); + void PrintLsaSa(Instruction* instr); void PrintSd(Instruction* instr); void PrintSs1(Instruction* instr); void PrintSs2(Instruction* instr); @@ -182,6 +184,17 @@ void Decoder::PrintFPURegister(int freg) { } +void Decoder::PrintFPUStatusRegister(int freg) { + switch (freg) { + case kFCSRRegister: + Print("FCSR"); + break; + default: + Print(converter_.NameOfXMMRegister(freg)); + } +} + + void Decoder::PrintFs(Instruction* instr) { int freg = instr->RsValue(); PrintFPURegister(freg); @@ -207,6 +220,13 @@ void Decoder::PrintSa(Instruction* instr) { } +// Print the integer value of the sa field of a lsa instruction. +void Decoder::PrintLsaSa(Instruction* instr) { + int sa = instr->LsaSaValue() + 1; + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); +} + + // Print the integer value of the rd field, when it is not used as reg. void Decoder::PrintSd(Instruction* instr) { int sd = instr->RdValue(); @@ -476,22 +496,42 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) { // complexity of FormatOption. int Decoder::FormatFPURegister(Instruction* instr, const char* format) { DCHECK(format[0] == 'f'); - if (format[1] == 's') { // 'fs: fs register. - int reg = instr->FsValue(); - PrintFPURegister(reg); - return 2; - } else if (format[1] == 't') { // 'ft: ft register. - int reg = instr->FtValue(); - PrintFPURegister(reg); - return 2; - } else if (format[1] == 'd') { // 'fd: fd register. - int reg = instr->FdValue(); - PrintFPURegister(reg); - return 2; - } else if (format[1] == 'r') { // 'fr: fr register. - int reg = instr->FrValue(); - PrintFPURegister(reg); - return 2; + if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) { + if (format[1] == 's') { // 'fs: fs register. + int reg = instr->FsValue(); + PrintFPUStatusRegister(reg); + return 2; + } else if (format[1] == 't') { // 'ft: ft register. + int reg = instr->FtValue(); + PrintFPUStatusRegister(reg); + return 2; + } else if (format[1] == 'd') { // 'fd: fd register. + int reg = instr->FdValue(); + PrintFPUStatusRegister(reg); + return 2; + } else if (format[1] == 'r') { // 'fr: fr register. + int reg = instr->FrValue(); + PrintFPUStatusRegister(reg); + return 2; + } + } else { + if (format[1] == 's') { // 'fs: fs register. + int reg = instr->FsValue(); + PrintFPURegister(reg); + return 2; + } else if (format[1] == 't') { // 'ft: ft register. + int reg = instr->FtValue(); + PrintFPURegister(reg); + return 2; + } else if (format[1] == 'd') { // 'fd: fd register. + int reg = instr->FdValue(); + PrintFPURegister(reg); + return 2; + } else if (format[1] == 'r') { // 'fr: fr register. + int reg = instr->FrValue(); + PrintFPURegister(reg); + return 2; + } } UNREACHABLE(); return -1; @@ -651,11 +691,17 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { } case 's': { // 'sa. switch (format[1]) { - case 'a': { - DCHECK(STRING_STARTS_WITH(format, "sa")); - PrintSa(instr); - return 2; - } + case 'a': + if (format[2] == '2') { + DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2 + PrintLsaSa(instr); + return 3; + } else { + DCHECK(STRING_STARTS_WITH(format, "sa")); + PrintSa(instr); + return 2; + } + break; case 'd': { DCHECK(STRING_STARTS_WITH(format, "sd")); PrintSd(instr); @@ -1026,6 +1072,9 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) { case SRAV: Format(instr, "srav 'rd, 'rt, 'rs"); break; + case LSA: + Format(instr, "lsa 'rd, 'rt, 'rs, 'sa2"); + break; case MFHI: if (instr->Bits(25, 16) == 0) { Format(instr, "mfhi 'rd"); @@ -1498,7 +1547,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) { Format(instr, "lui 'rt, 'imm16x"); } else { if (instr->RsValue() != 0) { - Format(instr, "aui 'rt, 'imm16x"); + Format(instr, "aui 'rt, 'rs, 'imm16x"); } else { Format(instr, "lui 'rt, 'imm16x"); } diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc index 2fe3554b68..3f4fb38028 100644 --- a/deps/v8/src/mips/interface-descriptors-mips.cc +++ b/deps/v8/src/mips/interface-descriptors-mips.cc @@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; } const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; } +const Register RestParamAccessDescriptor::parameter_count() { return a2; } +const Register RestParamAccessDescriptor::parameter_pointer() { return a3; } +const Register RestParamAccessDescriptor::rest_parameter_index() { return a1; } + + const Register ApiGetterDescriptor::function_address() { return a2; } @@ -125,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific( } +void FastCloneRegExpDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + Register registers[] = {a3, a2, a1, a0}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + + void FastCloneShallowArrayDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {a3, a2, a1}; @@ -187,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific( // a1 : the function to call // a2 : feedback vector // a3 : slot in feedback vector (Smi, for RecordCallTarget) - // t0 : original constructor (for IsSuperConstructorCall) + // t0 : new target (for IsSuperConstructorCall) // TODO(turbofan): So far we don't gather type feedback and hence skip the // slot parameter, but ArrayConstructStub needs the vector to be undefined. Register registers[] = {a0, a1, t0, a2}; @@ -204,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific( } +void ConstructStubDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // a1: target + // a3: new target + // a0: number of arguments + // a2: allocation site or undefined + Register registers[] = {a1, a3, a0, a2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + + +void ConstructTrampolineDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // a1: target + // a3: new target + // a0: number of arguments + Register registers[] = {a1, a3, a0}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + + void RegExpConstructResultDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {a2, a1, a0}; @@ -342,6 +375,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { a1, // JSFunction + a3, // the new target a0, // actual number of arguments a2, // expected number of arguments }; @@ -374,27 +408,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific( } -void MathRoundVariantCallFromUnoptimizedCodeDescriptor:: - InitializePlatformSpecific(CallInterfaceDescriptorData* data) { - Register registers[] = { - a1, // math rounding function - a3, // vector slot id - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - - -void MathRoundVariantCallFromOptimizedCodeDescriptor:: - InitializePlatformSpecific(CallInterfaceDescriptorData* data) { - Register registers[] = { - a1, // math rounding function - a3, // vector slot id - a2, // type vector - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - - void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { @@ -410,7 +423,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = { a0, // argument count (not including receiver) - a3, // original constructor + a3, // new target a1, // constructor to call a2 // address of the first argument }; diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 4a5a386fa0..3c866ac453 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -19,12 +19,13 @@ namespace v8 { namespace internal { -MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) +MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size, + CodeObjectRequired create_code_object) : Assembler(arg_isolate, buffer, size), generating_stub_(false), has_frame_(false), has_double_zero_reg_set_(false) { - if (isolate() != NULL) { + if (create_code_object == CodeObjectRequired::kYes) { code_object_ = Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); } @@ -433,10 +434,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, #endif // Load the native context of the current context. - int offset = - Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; - lw(scratch, FieldMemOperand(scratch, offset)); - lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset)); + lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX)); // Check the context is a native context. if (emit_debug_code()) { @@ -1054,6 +1052,19 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { } +void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, + Register scratch) { + if (IsMipsArchVariant(kMips32r6) && sa <= 4) { + lsa(rd, rt, rs, sa); + } else { + Register tmp = rd.is(rt) ? scratch : rd; + DCHECK(!tmp.is(rt)); + sll(tmp, rs, sa); + Addu(rd, rt, tmp); + } +} + + // ------------Pseudo-instructions------------- void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { @@ -1267,50 +1278,40 @@ void MacroAssembler::Ins(Register rt, } -void MacroAssembler::Cvt_d_uw(FPURegister fd, - FPURegister fs, +void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch) { - // Move the data from fs to t8. - mfc1(t8, fs); - Cvt_d_uw(fd, t8, scratch); -} - - -void MacroAssembler::Cvt_d_uw(FPURegister fd, - Register rs, - FPURegister scratch) { - // Convert rs to a FP value in fd (and fd + 1). - // We do this by converting rs minus the MSB to avoid sign conversion, - // then adding 2^31 to the result (if needed). + // In FP64Mode we do convertion from long. + if (IsFp64Mode()) { + mtc1(rs, scratch); + Mthc1(zero_reg, scratch); + cvt_d_l(fd, scratch); + } else { + // Convert rs to a FP value in fd. + DCHECK(!fd.is(scratch)); + DCHECK(!rs.is(at)); - DCHECK(!fd.is(scratch)); - DCHECK(!rs.is(t9)); - DCHECK(!rs.is(at)); + Label msb_clear, conversion_done; + // For a value which is < 2^31, regard it as a signed positve word. + Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT); + mtc1(rs, fd); - // Save rs's MSB to t9. - Ext(t9, rs, 31, 1); - // Remove rs's MSB. - Ext(at, rs, 0, 31); - // Move the result to fd. - mtc1(at, fd); + li(at, 0x41F00000); // FP value: 2^32. - // Convert fd to a real FP value. - cvt_d_w(fd, fd); + // For unsigned inputs > 2^31, we convert to double as a signed int32, + // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1. + mtc1(zero_reg, scratch); + Mthc1(at, scratch); - Label conversion_done; + cvt_d_w(fd, fd); - // If rs's MSB was 0, it's done. - // Otherwise we need to add that to the FP register. - Branch(&conversion_done, eq, t9, Operand(zero_reg)); + Branch(USE_DELAY_SLOT, &conversion_done); + add_d(fd, fd, scratch); - // Load 2^31 into f20 as its float representation. - li(at, 0x41E00000); - mtc1(zero_reg, scratch); - Mthc1(at, scratch); - // Add it to fd. - add_d(fd, fd, scratch); + bind(&msb_clear); + cvt_d_w(fd, fd); - bind(&conversion_done); + bind(&conversion_done); + } } @@ -1438,13 +1439,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, if (!IsMipsArchVariant(kMips32r6)) { if (long_branch) { Label skip; - c(UN, D, cmp1, cmp2); + c(UN, sizeField, cmp1, cmp2); bc1f(&skip); nop(); BranchLong(nan, bd); bind(&skip); } else { - c(UN, D, cmp1, cmp2); + c(UN, sizeField, cmp1, cmp2); bc1t(nan); if (bd == PROTECT) { nop(); @@ -1456,13 +1457,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg)); if (long_branch) { Label skip; - cmp(UN, L, kDoubleCompareReg, cmp1, cmp2); + cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); bc1eqz(&skip, kDoubleCompareReg); nop(); BranchLong(nan, bd); bind(&skip); } else { - cmp(UN, L, kDoubleCompareReg, cmp1, cmp2); + cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); bc1nez(nan, kDoubleCompareReg); if (bd == PROTECT) { nop(); @@ -3270,12 +3271,7 @@ void MacroAssembler::Allocate(int object_size, return; } - DCHECK(!result.is(scratch1)); - DCHECK(!result.is(scratch2)); - DCHECK(!scratch1.is(scratch2)); - DCHECK(!scratch1.is(t9)); - DCHECK(!scratch2.is(t9)); - DCHECK(!result.is(t9)); + DCHECK(!AreAliased(result, scratch1, scratch2, t9)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { @@ -3291,54 +3287,52 @@ void MacroAssembler::Allocate(int object_size, ExternalReference allocation_limit = AllocationUtils::GetAllocationLimitReference(isolate(), flags); - intptr_t top = - reinterpret_cast<intptr_t>(allocation_top.address()); - intptr_t limit = - reinterpret_cast<intptr_t>(allocation_limit.address()); + intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address()); + intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); - // Set up allocation top address and object size registers. - Register topaddr = scratch1; - li(topaddr, Operand(allocation_top)); - + // Set up allocation top address and allocation limit registers. + Register top_address = scratch1; // This code stores a temporary value in t9. + Register alloc_limit = t9; + Register result_end = scratch2; + li(top_address, Operand(allocation_top)); + if ((flags & RESULT_CONTAINS_TOP) == 0) { - // Load allocation top into result and allocation limit into t9. - lw(result, MemOperand(topaddr)); - lw(t9, MemOperand(topaddr, kPointerSize)); + // Load allocation top into result and allocation limit into alloc_limit. + lw(result, MemOperand(top_address)); + lw(alloc_limit, MemOperand(top_address, kPointerSize)); } else { if (emit_debug_code()) { - // Assert that result actually contains top on entry. t9 is used - // immediately below so this use of t9 does not cause difference with - // respect to register content between debug and release mode. - lw(t9, MemOperand(topaddr)); - Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); + // Assert that result actually contains top on entry. + lw(alloc_limit, MemOperand(top_address)); + Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit)); } - // Load allocation limit into t9. Result already contains allocation top. - lw(t9, MemOperand(topaddr, limit - top)); + // Load allocation limit. Result already contains allocation top. + lw(alloc_limit, MemOperand(top_address, limit - top)); } if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. DCHECK(kPointerAlignment * 2 == kDoubleAlignment); - And(scratch2, result, Operand(kDoubleAlignmentMask)); + And(result_end, result, Operand(kDoubleAlignmentMask)); Label aligned; - Branch(&aligned, eq, scratch2, Operand(zero_reg)); + Branch(&aligned, eq, result_end, Operand(zero_reg)); if ((flags & PRETENURE) != 0) { - Branch(gc_required, Ugreater_equal, result, Operand(t9)); + Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit)); } - li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - sw(scratch2, MemOperand(result)); + li(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); + sw(result_end, MemOperand(result)); Addu(result, result, Operand(kDoubleSize / 2)); bind(&aligned); } // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. - Addu(scratch2, result, Operand(object_size)); - Branch(gc_required, Ugreater, scratch2, Operand(t9)); - sw(scratch2, MemOperand(topaddr)); + Addu(result_end, result, Operand(object_size)); + Branch(gc_required, Ugreater, result_end, Operand(alloc_limit)); + sw(result_end, MemOperand(top_address)); // Tag object if requested. if ((flags & TAG_OBJECT) != 0) { @@ -3347,28 +3341,25 @@ void MacroAssembler::Allocate(int object_size, } -void MacroAssembler::Allocate(Register object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::Allocate(Register object_size, Register result, + Register result_end, Register scratch, + Label* gc_required, AllocationFlags flags) { if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. li(result, 0x7091); - li(scratch1, 0x7191); - li(scratch2, 0x7291); + li(scratch, 0x7191); + li(result_end, 0x7291); } jmp(gc_required); return; } - DCHECK(!result.is(scratch1)); - DCHECK(!result.is(scratch2)); - DCHECK(!scratch1.is(scratch2)); - DCHECK(!object_size.is(t9)); - DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); + // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag + // is not specified. Other registers must not overlap. + DCHECK(!AreAliased(object_size, result, scratch, t9)); + DCHECK(!AreAliased(result_end, result, scratch, t9)); + DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end)); // Check relative positions of allocation top and limit addresses. // ARM adds additional checks to make sure the ldm instruction can be @@ -3377,45 +3368,42 @@ void MacroAssembler::Allocate(Register object_size, AllocationUtils::GetAllocationTopReference(isolate(), flags); ExternalReference allocation_limit = AllocationUtils::GetAllocationLimitReference(isolate(), flags); - intptr_t top = - reinterpret_cast<intptr_t>(allocation_top.address()); - intptr_t limit = - reinterpret_cast<intptr_t>(allocation_limit.address()); + intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address()); + intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); - // Set up allocation top address and object size registers. - Register topaddr = scratch1; - li(topaddr, Operand(allocation_top)); - + // Set up allocation top address and allocation limit registers. + Register top_address = scratch; // This code stores a temporary value in t9. + Register alloc_limit = t9; + li(top_address, Operand(allocation_top)); + if ((flags & RESULT_CONTAINS_TOP) == 0) { - // Load allocation top into result and allocation limit into t9. - lw(result, MemOperand(topaddr)); - lw(t9, MemOperand(topaddr, kPointerSize)); + // Load allocation top into result and allocation limit into alloc_limit. + lw(result, MemOperand(top_address)); + lw(alloc_limit, MemOperand(top_address, kPointerSize)); } else { if (emit_debug_code()) { - // Assert that result actually contains top on entry. t9 is used - // immediately below so this use of t9 does not cause difference with - // respect to register content between debug and release mode. - lw(t9, MemOperand(topaddr)); - Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); + // Assert that result actually contains top on entry. + lw(alloc_limit, MemOperand(top_address)); + Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit)); } - // Load allocation limit into t9. Result already contains allocation top. - lw(t9, MemOperand(topaddr, limit - top)); + // Load allocation limit. Result already contains allocation top. + lw(alloc_limit, MemOperand(top_address, limit - top)); } if ((flags & DOUBLE_ALIGNMENT) != 0) { // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. DCHECK(kPointerAlignment * 2 == kDoubleAlignment); - And(scratch2, result, Operand(kDoubleAlignmentMask)); + And(result_end, result, Operand(kDoubleAlignmentMask)); Label aligned; - Branch(&aligned, eq, scratch2, Operand(zero_reg)); + Branch(&aligned, eq, result_end, Operand(zero_reg)); if ((flags & PRETENURE) != 0) { - Branch(gc_required, Ugreater_equal, result, Operand(t9)); + Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit)); } - li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); - sw(scratch2, MemOperand(result)); + li(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); + sw(result_end, MemOperand(result)); Addu(result, result, Operand(kDoubleSize / 2)); bind(&aligned); } @@ -3424,19 +3412,19 @@ void MacroAssembler::Allocate(Register object_size, // to calculate the new top. Object size may be in words so a shift is // required to get the number of bytes. if ((flags & SIZE_IN_WORDS) != 0) { - sll(scratch2, object_size, kPointerSizeLog2); - Addu(scratch2, result, scratch2); + sll(result_end, object_size, kPointerSizeLog2); + Addu(result_end, result, result_end); } else { - Addu(scratch2, result, Operand(object_size)); + Addu(result_end, result, Operand(object_size)); } - Branch(gc_required, Ugreater, scratch2, Operand(t9)); + Branch(gc_required, Ugreater, result_end, Operand(alloc_limit)); // Update allocation top. result temporarily holds the new top. if (emit_debug_code()) { - And(t9, scratch2, Operand(kObjectAlignmentMask)); - Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg)); + And(alloc_limit, result_end, Operand(kObjectAlignmentMask)); + Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg)); } - sw(scratch2, MemOperand(topaddr)); + sw(result_end, MemOperand(top_address)); // Tag object if requested. if ((flags & TAG_OBJECT) != 0) { @@ -3612,29 +3600,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result, } -// Copies a fixed number of fields of heap objects from src to dst. -void MacroAssembler::CopyFields(Register dst, - Register src, - RegList temps, - int field_count) { - DCHECK((temps & dst.bit()) == 0); - DCHECK((temps & src.bit()) == 0); - // Primitive implementation using only one temporary register. +void MacroAssembler::AllocateJSValue(Register result, Register constructor, + Register value, Register scratch1, + Register scratch2, Label* gc_required) { + DCHECK(!result.is(constructor)); + DCHECK(!result.is(scratch1)); + DCHECK(!result.is(scratch2)); + DCHECK(!result.is(value)); - Register tmp = no_reg; - // Find a temp register in temps list. - for (int i = 0; i < kNumRegisters; i++) { - if ((temps & (1 << i)) != 0) { - tmp.reg_code = i; - break; - } - } - DCHECK(!tmp.is(no_reg)); + // Allocate JSValue in new space. + Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); - for (int i = 0; i < field_count; i++) { - lw(tmp, FieldMemOperand(src, i * kPointerSize)); - sw(tmp, FieldMemOperand(dst, i * kPointerSize)); - } + // Initialize the JSValue. + LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); + sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); + LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); + sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); + sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); + sw(value, FieldMemOperand(result, JSValue::kValueOffset)); + STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); } @@ -3706,16 +3690,16 @@ void MacroAssembler::CopyBytes(Register src, } -void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, - Register end_offset, +void MacroAssembler::InitializeFieldsWithFiller(Register current_address, + Register end_address, Register filler) { Label loop, entry; Branch(&entry); bind(&loop); - sw(filler, MemOperand(start_offset)); - Addu(start_offset, start_offset, kPointerSize); + sw(filler, MemOperand(current_address)); + Addu(current_address, current_address, kPointerSize); bind(&entry); - Branch(&loop, ult, start_offset, Operand(end_offset)); + Branch(&loop, ult, current_address, Operand(end_address)); } @@ -3766,6 +3750,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register scratch3, Label* fail, int elements_offset) { + DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2, + scratch3)); Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; @@ -3820,7 +3806,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Addu(scratch1, scratch1, scratch2); // scratch1 is now effective address of the double element - Register untagged_value = elements_reg; + Register untagged_value = scratch2; SmiUntag(untagged_value, value_reg); mtc1(untagged_value, f2); cvt_d_w(f0, f2); @@ -3985,8 +3971,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1, void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, - Handle<Code> code_constant, - Register code_reg, Label* done, bool* definitely_mismatches, InvokeFlag flag, @@ -4006,7 +3990,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, // passed in registers. DCHECK(actual.is_immediate() || actual.reg().is(a0)); DCHECK(expected.is_immediate() || expected.reg().is(a2)); - DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); if (expected.is_immediate()) { DCHECK(actual.is_immediate()); @@ -4034,11 +4017,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } if (!definitely_matches) { - if (!code_constant.is_null()) { - li(a3, Operand(code_constant)); - addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); - } - Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (flag == CALL_FUNCTION) { @@ -4056,21 +4034,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } -void MacroAssembler::InvokeCode(Register code, - const ParameterCount& expected, - const ParameterCount& actual, - InvokeFlag flag, - const CallWrapper& call_wrapper) { +void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + Label skip_flooding; + ExternalReference step_in_enabled = + ExternalReference::debug_step_in_enabled_address(isolate()); + li(t0, Operand(step_in_enabled)); + lb(t0, MemOperand(t0)); + Branch(&skip_flooding, eq, t0, Operand(zero_reg)); + { + FrameScope frame(this, + has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); + } + } + bind(&skip_flooding); +} + + +void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper) { // You can't call a function without a valid frame. DCHECK(flag == JUMP_FUNCTION || has_frame()); + DCHECK(function.is(a1)); + DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3)); - Label done; + if (call_wrapper.NeedsDebugStepCheck()) { + FloodFunctionIfStepping(function, new_target, expected, actual); + } + + // Clear the new.target register if not given. + if (!new_target.is_valid()) { + LoadRoot(a3, Heap::kUndefinedValueRootIndex); + } + Label done; bool definitely_mismatches = false; - InvokePrologue(expected, actual, Handle<Code>::null(), code, - &done, &definitely_mismatches, flag, + InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, call_wrapper); if (!definitely_mismatches) { + // We call indirectly through the code field in the function to + // allow recompilation to take effect without changing any of the + // call sites. + Register code = t0; + lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(code)); Call(code); @@ -4087,6 +4122,7 @@ void MacroAssembler::InvokeCode(Register code, void MacroAssembler::InvokeFunction(Register function, + Register new_target, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { @@ -4096,18 +4132,18 @@ void MacroAssembler::InvokeFunction(Register function, // Contract with called JS functions requires that function is passed in a1. DCHECK(function.is(a1)); Register expected_reg = a2; - Register code_reg = a3; + Register temp_reg = t0; - lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); lw(expected_reg, - FieldMemOperand(code_reg, - SharedFunctionInfo::kFormalParameterCountOffset)); + FieldMemOperand(temp_reg, + SharedFunctionInfo::kFormalParameterCountOffset)); sra(expected_reg, expected_reg, kSmiTagSize); - lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); ParameterCount expected(expected_reg); - InvokeCode(code_reg, expected, actual, flag, call_wrapper); + InvokeFunctionCode(function, new_target, expected, actual, flag, + call_wrapper); } @@ -4125,11 +4161,7 @@ void MacroAssembler::InvokeFunction(Register function, // Get the function and setup the context. lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - // We call indirectly through the code field in the function to - // allow recompilation to take effect without changing any of the - // call sites. - lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); - InvokeCode(a3, expected, actual, flag, call_wrapper); + InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper); } @@ -4301,108 +4333,161 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi, } -void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, - const Operand& right, - Register overflow_dst, - Register scratch) { +static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst, + Label* overflow_label, + Label* no_overflow_label) { + DCHECK(overflow_label || no_overflow_label); + if (!overflow_label) { + DCHECK(no_overflow_label); + masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg)); + } else { + masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg)); + if (no_overflow_label) masm->Branch(no_overflow_label); + } +} + + +void MacroAssembler::AddBranchOvf(Register dst, Register left, + const Operand& right, Label* overflow_label, + Label* no_overflow_label, Register scratch) { if (right.is_reg()) { - AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); + AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label, + scratch); } else { - if (dst.is(left)) { - mov(scratch, left); // Preserve left. - Addu(dst, left, right.immediate()); // Left is overwritten. - xor_(scratch, dst, scratch); // Original left. - // Load right since xori takes uint16 as immediate. - Addu(t9, zero_reg, right); - xor_(overflow_dst, dst, t9); - and_(overflow_dst, overflow_dst, scratch); + if (IsMipsArchVariant(kMips32r6)) { + Register right_reg = t9; + DCHECK(!left.is(right_reg)); + li(right_reg, Operand(right)); + AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label); } else { - Addu(dst, left, right.immediate()); - xor_(overflow_dst, dst, left); - // Load right since xori takes uint16 as immediate. - Addu(t9, zero_reg, right); - xor_(scratch, dst, t9); - and_(overflow_dst, scratch, overflow_dst); + Register overflow_dst = t9; + DCHECK(!dst.is(scratch)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!scratch.is(overflow_dst)); + DCHECK(!left.is(overflow_dst)); + if (dst.is(left)) { + mov(scratch, left); // Preserve left. + Addu(dst, left, right.immediate()); // Left is overwritten. + xor_(scratch, dst, scratch); // Original left. + // Load right since xori takes uint16 as immediate. + Addu(overflow_dst, zero_reg, right); + xor_(overflow_dst, dst, overflow_dst); + and_(overflow_dst, overflow_dst, scratch); + } else { + Addu(dst, left, right.immediate()); + xor_(overflow_dst, dst, left); + // Load right since xori takes uint16 as immediate. + Addu(scratch, zero_reg, right); + xor_(scratch, dst, scratch); + and_(overflow_dst, scratch, overflow_dst); + } + BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label); } } } -void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, - Register right, - Register overflow_dst, - Register scratch) { - DCHECK(!dst.is(overflow_dst)); - DCHECK(!dst.is(scratch)); - DCHECK(!overflow_dst.is(scratch)); - DCHECK(!overflow_dst.is(left)); - DCHECK(!overflow_dst.is(right)); - - if (left.is(right) && dst.is(left)) { - DCHECK(!dst.is(t9)); - DCHECK(!scratch.is(t9)); - DCHECK(!left.is(t9)); - DCHECK(!right.is(t9)); - DCHECK(!overflow_dst.is(t9)); - mov(t9, right); - right = t9; - } - - if (dst.is(left)) { - mov(scratch, left); // Preserve left. - addu(dst, left, right); // Left is overwritten. - xor_(scratch, dst, scratch); // Original left. - xor_(overflow_dst, dst, right); - and_(overflow_dst, overflow_dst, scratch); - } else if (dst.is(right)) { - mov(scratch, right); // Preserve right. - addu(dst, left, right); // Right is overwritten. - xor_(scratch, dst, scratch); // Original right. - xor_(overflow_dst, dst, left); - and_(overflow_dst, overflow_dst, scratch); +void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right, + Label* overflow_label, + Label* no_overflow_label, Register scratch) { + if (IsMipsArchVariant(kMips32r6)) { + if (!overflow_label) { + DCHECK(no_overflow_label); + DCHECK(!dst.is(scratch)); + Register left_reg = left.is(dst) ? scratch : left; + Register right_reg = right.is(dst) ? t9 : right; + DCHECK(!dst.is(left_reg)); + DCHECK(!dst.is(right_reg)); + Move(left_reg, left); + Move(right_reg, right); + addu(dst, left, right); + bnvc(left_reg, right_reg, no_overflow_label); + } else { + bovc(left, right, overflow_label); + addu(dst, left, right); + if (no_overflow_label) bc(no_overflow_label); + } } else { - addu(dst, left, right); - xor_(overflow_dst, dst, left); - xor_(scratch, dst, right); - and_(overflow_dst, scratch, overflow_dst); + Register overflow_dst = t9; + DCHECK(!dst.is(scratch)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!scratch.is(overflow_dst)); + DCHECK(!left.is(overflow_dst)); + DCHECK(!right.is(overflow_dst)); + DCHECK(!left.is(scratch)); + DCHECK(!right.is(scratch)); + + if (left.is(right) && dst.is(left)) { + mov(overflow_dst, right); + right = overflow_dst; + } + + if (dst.is(left)) { + mov(scratch, left); // Preserve left. + addu(dst, left, right); // Left is overwritten. + xor_(scratch, dst, scratch); // Original left. + xor_(overflow_dst, dst, right); + and_(overflow_dst, overflow_dst, scratch); + } else if (dst.is(right)) { + mov(scratch, right); // Preserve right. + addu(dst, left, right); // Right is overwritten. + xor_(scratch, dst, scratch); // Original right. + xor_(overflow_dst, dst, left); + and_(overflow_dst, overflow_dst, scratch); + } else { + addu(dst, left, right); + xor_(overflow_dst, dst, left); + xor_(scratch, dst, right); + and_(overflow_dst, scratch, overflow_dst); + } + BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label); } } -void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, - const Operand& right, - Register overflow_dst, - Register scratch) { +void MacroAssembler::SubBranchOvf(Register dst, Register left, + const Operand& right, Label* overflow_label, + Label* no_overflow_label, Register scratch) { + DCHECK(overflow_label || no_overflow_label); if (right.is_reg()) { - SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); + SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label, + scratch); } else { + Register overflow_dst = t9; + DCHECK(!dst.is(scratch)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!scratch.is(overflow_dst)); + DCHECK(!left.is(overflow_dst)); + DCHECK(!left.is(scratch)); if (dst.is(left)) { mov(scratch, left); // Preserve left. - Subu(dst, left, right); // Left is overwritten. - xor_(overflow_dst, dst, scratch); // scratch is original left. + Subu(dst, left, right.immediate()); // Left is overwritten. // Load right since xori takes uint16 as immediate. - Addu(t9, zero_reg, right); - xor_(scratch, scratch, t9); // scratch is original left. + Addu(overflow_dst, zero_reg, right); + xor_(overflow_dst, scratch, overflow_dst); // scratch is original left. + xor_(scratch, dst, scratch); // scratch is original left. and_(overflow_dst, scratch, overflow_dst); } else { Subu(dst, left, right); xor_(overflow_dst, dst, left); // Load right since xori takes uint16 as immediate. - Addu(t9, zero_reg, right); - xor_(scratch, left, t9); + Addu(scratch, zero_reg, right); + xor_(scratch, left, scratch); and_(overflow_dst, scratch, overflow_dst); } + BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label); } } -void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, - Register right, - Register overflow_dst, - Register scratch) { - DCHECK(!dst.is(overflow_dst)); +void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right, + Label* overflow_label, + Label* no_overflow_label, Register scratch) { + DCHECK(overflow_label || no_overflow_label); + Register overflow_dst = t9; DCHECK(!dst.is(scratch)); - DCHECK(!overflow_dst.is(scratch)); + DCHECK(!dst.is(overflow_dst)); + DCHECK(!scratch.is(overflow_dst)); DCHECK(!overflow_dst.is(left)); DCHECK(!overflow_dst.is(right)); DCHECK(!scratch.is(left)); @@ -4412,8 +4497,9 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, // left == right, let's not make that restriction here. if (left.is(right)) { mov(dst, zero_reg); - mov(overflow_dst, zero_reg); - return; + if (no_overflow_label) { + Branch(no_overflow_label); + } } if (dst.is(left)) { @@ -4434,6 +4520,7 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, xor_(scratch, left, right); and_(overflow_dst, scratch, overflow_dst); } + BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label); } @@ -4469,24 +4556,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext, } -void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, - int num_arguments, - int result_size) { - // TODO(1236192): Most runtime routines don't need the number of - // arguments passed in because it is constant. At some point we - // should remove this need and make the runtime routine entry code - // smarter. - PrepareCEntryArgs(num_arguments); - JumpToExternalReference(ext); -} - - -void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, - int num_arguments, - int result_size) { - TailCallExternalReference(ExternalReference(fid, isolate()), - num_arguments, - result_size); +void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { + const Runtime::Function* function = Runtime::FunctionForId(fid); + DCHECK_EQ(1, function->result_size); + if (function->nargs >= 0) { + PrepareCEntryArgs(function->nargs); + } + JumpToExternalReference(ExternalReference(fid, isolate())); } @@ -4508,34 +4584,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag, // You can't call a builtin without a valid frame. DCHECK(flag == JUMP_FUNCTION || has_frame()); - GetBuiltinEntry(t9, native_context_index); - if (flag == CALL_FUNCTION) { - call_wrapper.BeforeCall(CallSize(t9)); - Call(t9); - call_wrapper.AfterCall(); - } else { - DCHECK(flag == JUMP_FUNCTION); - Jump(t9); - } -} - - -void MacroAssembler::GetBuiltinFunction(Register target, - int native_context_index) { - // Load the builtins object into target register. - lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - lw(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset)); - // Load the JavaScript builtin function from the builtins object. - lw(target, ContextOperand(target, native_context_index)); -} - - -void MacroAssembler::GetBuiltinEntry(Register target, - int native_context_index) { - DCHECK(!target.is(a1)); - GetBuiltinFunction(a1, native_context_index); - // Load the code entry point from the builtins object. - lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + // Fake a parameter count to avoid emitting code to do the check. + ParameterCount expected(0); + LoadNativeContextSlot(native_context_index, a1); + InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper); } @@ -4672,47 +4724,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { } -void MacroAssembler::LoadGlobalProxy(Register dst) { - lw(dst, GlobalObjectOperand()); - lw(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset)); -} - - void MacroAssembler::LoadTransitionedArrayMapConditional( ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label* no_map_match) { - // Load the global or builtins object from the current context. - lw(scratch, - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset)); + DCHECK(IsFastElementsKind(expected_kind)); + DCHECK(IsFastElementsKind(transitioned_kind)); // Check that the function's map is the same as the expected cached map. - lw(scratch, - MemOperand(scratch, - Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); - size_t offset = expected_kind * kPointerSize + - FixedArrayBase::kHeaderSize; - lw(at, FieldMemOperand(scratch, offset)); + lw(scratch, NativeContextMemOperand()); + lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind))); Branch(no_map_match, ne, map_in_out, Operand(at)); // Use the transitioned cached map. - offset = transitioned_kind * kPointerSize + - FixedArrayBase::kHeaderSize; - lw(map_in_out, FieldMemOperand(scratch, offset)); + lw(map_in_out, + ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind))); } -void MacroAssembler::LoadGlobalFunction(int index, Register function) { - // Load the global or builtins object from the current context. - lw(function, - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); - // Load the native context from the global or builtins object. - lw(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset)); - // Load the function from the native context. - lw(function, MemOperand(function, Context::SlotOffset(index))); +void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { + lw(dst, NativeContextMemOperand()); + lw(dst, ContextMemOperand(dst, index)); } @@ -5115,6 +5149,17 @@ void MacroAssembler::AssertFunction(Register object) { } +void MacroAssembler::AssertBoundFunction(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + SmiTst(object, t8); + Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg)); + GetObjectType(object, t8, t8); + Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE)); + } +} + + void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, Register scratch) { if (emit_debug_code()) { @@ -5385,8 +5430,8 @@ void MacroAssembler::JumpIfBlack(Register object, Register scratch0, Register scratch1, Label* on_black) { - HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. - DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); + HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. + DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); } @@ -5420,28 +5465,6 @@ void MacroAssembler::HasColor(Register object, } -// Detect some, but not all, common pointer-free objects. This is used by the -// incremental write barrier which doesn't care about oddballs (they are always -// marked black immediately so this code is not hit). -void MacroAssembler::JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object) { - DCHECK(!AreAliased(value, scratch, t8, no_reg)); - Label is_data_object; - lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); - LoadRoot(t8, Heap::kHeapNumberMapRootIndex); - Branch(&is_data_object, eq, t8, Operand(scratch)); - DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); - Branch(not_data_object, ne, t8, Operand(zero_reg)); - bind(&is_data_object); -} - - void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg) { @@ -5457,112 +5480,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg, } -void MacroAssembler::EnsureNotWhite( - Register value, - Register bitmap_scratch, - Register mask_scratch, - Register load_scratch, - Label* value_is_white_and_not_data) { +void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, + Register mask_scratch, Register load_scratch, + Label* value_is_white) { DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); - DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); - DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); + DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); - Label done; - // Since both black and grey have a 1 in the first position and white does // not have a 1 there we only need to check one bit. lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); And(t8, mask_scratch, load_scratch); - Branch(&done, ne, t8, Operand(zero_reg)); - - if (emit_debug_code()) { - // Check for impossible bit pattern. - Label ok; - // sll may overflow, making the check conservative. - sll(t8, mask_scratch, 1); - And(t8, load_scratch, t8); - Branch(&ok, eq, t8, Operand(zero_reg)); - stop("Impossible marking bit pattern"); - bind(&ok); - } - - // Value is white. We check whether it is data that doesn't need scanning. - // Currently only checks for HeapNumber and non-cons strings. - Register map = load_scratch; // Holds map while checking type. - Register length = load_scratch; // Holds length of object after testing type. - Label is_data_object; - - // Check for heap-number - lw(map, FieldMemOperand(value, HeapObject::kMapOffset)); - LoadRoot(t8, Heap::kHeapNumberMapRootIndex); - { - Label skip; - Branch(&skip, ne, t8, Operand(map)); - li(length, HeapNumber::kSize); - Branch(&is_data_object); - bind(&skip); - } - - // Check for strings. - DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - Register instance_type = load_scratch; - lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); - And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); - Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg)); - // It's a non-indirect (non-cons and non-slice) string. - // If it's external, the length is just ExternalString::kSize. - // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). - // External strings are the only ones with the kExternalStringTag bit - // set. - DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); - DCHECK_EQ(0, kConsStringTag & kExternalStringTag); - And(t8, instance_type, Operand(kExternalStringTag)); - { - Label skip; - Branch(&skip, eq, t8, Operand(zero_reg)); - li(length, ExternalString::kSize); - Branch(&is_data_object); - bind(&skip); - } - - // Sequential string, either Latin1 or UC16. - // For Latin1 (char-size of 1) we shift the smi tag away to get the length. - // For UC16 (char-size of 2) we just leave the smi tag in place, thereby - // getting the length multiplied by 2. - DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); - DCHECK(kSmiTag == 0 && kSmiTagSize == 1); - lw(t9, FieldMemOperand(value, String::kLengthOffset)); - And(t8, instance_type, Operand(kStringEncodingMask)); - { - Label skip; - Branch(&skip, eq, t8, Operand(zero_reg)); - srl(t9, t9, 1); - bind(&skip); - } - Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); - And(length, length, Operand(~kObjectAlignmentMask)); - - bind(&is_data_object); - // Value is a data object, and it is white. Mark it black. Since we know - // that the object is white we can make it black by flipping one bit. - lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - Or(t8, t8, Operand(mask_scratch)); - sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - - And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); - lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); - Addu(t8, t8, Operand(length)); - sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); - - bind(&done); + Branch(value_is_white, eq, t8, Operand(zero_reg)); } @@ -5767,17 +5701,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( } -bool AreAliased(Register reg1, - Register reg2, - Register reg3, - Register reg4, - Register reg5, - Register reg6, - Register reg7, - Register reg8) { - int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + - reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + - reg7.is_valid() + reg8.is_valid(); +bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4, + Register reg5, Register reg6, Register reg7, Register reg8, + Register reg9, Register reg10) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() + + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + + reg7.is_valid() + reg8.is_valid() + reg9.is_valid() + + reg10.is_valid(); RegList regs = 0; if (reg1.is_valid()) regs |= reg1.bit(); @@ -5788,18 +5718,19 @@ bool AreAliased(Register reg1, if (reg6.is_valid()) regs |= reg6.bit(); if (reg7.is_valid()) regs |= reg7.bit(); if (reg8.is_valid()) regs |= reg8.bit(); + if (reg9.is_valid()) regs |= reg9.bit(); + if (reg10.is_valid()) regs |= reg10.bit(); int n_of_non_aliasing_regs = NumRegs(regs); return n_of_valid_regs != n_of_non_aliasing_regs; } -CodePatcher::CodePatcher(byte* address, - int instructions, +CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions, FlushICache flush_cache) : address_(address), size_(instructions * Assembler::kInstrSize), - masm_(NULL, address, size_ + Assembler::kGap), + masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo), flush_cache_(flush_cache) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size @@ -5811,7 +5742,7 @@ CodePatcher::CodePatcher(byte* address, CodePatcher::~CodePatcher() { // Indicate that code has changed. if (flush_cache_ == FLUSH) { - CpuFeatures::FlushICache(address_, size_); + Assembler::FlushICache(masm_.isolate(), address_, size_); } // Check that the code was patched as expected. diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 8890be8131..4f6a3c868b 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -23,6 +23,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4}; const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5}; const Register kInterpreterDispatchTableRegister = {Register::kCode_t6}; const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0}; +const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3}; const Register kRuntimeCallFunctionRegister = {Register::kCode_a1}; const Register kRuntimeCallArgCountRegister = {Register::kCode_a0}; @@ -98,26 +99,23 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg5 = no_reg, Register reg6 = no_reg); -bool AreAliased(Register reg1, - Register reg2, - Register reg3 = no_reg, - Register reg4 = no_reg, - Register reg5 = no_reg, - Register reg6 = no_reg, - Register reg7 = no_reg, - Register reg8 = no_reg); +bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg, + Register reg4 = no_reg, Register reg5 = no_reg, + Register reg6 = no_reg, Register reg7 = no_reg, + Register reg8 = no_reg, Register reg9 = no_reg, + Register reg10 = no_reg); // ----------------------------------------------------------------------------- // Static helper functions. -inline MemOperand ContextOperand(Register context, int index) { +inline MemOperand ContextMemOperand(Register context, int index) { return MemOperand(context, Context::SlotOffset(index)); } -inline MemOperand GlobalObjectOperand() { - return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); +inline MemOperand NativeContextMemOperand() { + return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX); } @@ -140,11 +138,8 @@ inline MemOperand CFunctionArgumentOperand(int index) { // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: - // The isolate parameter can be NULL if the macro assembler should - // not use isolate-dependent functionality. In this case, it's the - // responsibility of the caller to never invoke such function on the - // macro assembler. - MacroAssembler(Isolate* isolate, void* buffer, int size); + MacroAssembler(Isolate* isolate, void* buffer, int size, + CodeObjectRequired create_code_object); // Arguments macros. #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 @@ -386,22 +381,10 @@ class MacroAssembler: public Assembler { Register scratch1, Label* on_black); - // Checks the color of an object. If the object is already grey or black - // then we just fall through, since it is already live. If it is white and - // we can determine that it doesn't need to be scanned, then we just mark it - // black and fall through. For the rest we jump to the label so the - // incremental marker can fix its assumptions. - void EnsureNotWhite(Register object, - Register scratch1, - Register scratch2, - Register scratch3, - Label* object_is_white_and_not_data); - - // Detects conservatively whether an object is data-only, i.e. it does need to - // be scanned by the garbage collector. - void JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object); + // Checks the color of an object. If the object is white we jump to the + // incremental marker. + void JumpIfWhite(Register value, Register scratch1, Register scratch2, + Register scratch3, Label* value_is_white); // Notify the garbage collector that we wrote a pointer into an object. // |object| is the object being stored into, |value| is the object being @@ -538,12 +521,8 @@ class MacroAssembler: public Assembler { Label* gc_required, AllocationFlags flags); - void Allocate(Register object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags); + void Allocate(Register object_size, Register result, Register result_new, + Register scratch, Label* gc_required, AllocationFlags flags); void AllocateTwoByteString(Register result, Register length, @@ -587,6 +566,12 @@ class MacroAssembler: public Assembler { Register scratch2, Label* gc_required); + // Allocate and initialize a JSValue wrapper with the specified {constructor} + // and {value}. + void AllocateJSValue(Register result, Register constructor, Register value, + Register scratch1, Register scratch2, + Label* gc_required); + // --------------------------------------------------------------------------- // Instruction macros. @@ -648,7 +633,10 @@ class MacroAssembler: public Assembler { #undef DEFINE_INSTRUCTION #undef DEFINE_INSTRUCTION2 +#undef DEFINE_INSTRUCTION3 + void Lsa(Register rd, Register rs, Register rt, uint8_t sa, + Register scratch = at); void Pref(int32_t hint, const MemOperand& rs); @@ -781,7 +769,6 @@ class MacroAssembler: public Assembler { // FPU macros. These do not handle special cases like NaN or +- inf. // Convert unsigned word to double. - void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); // Convert double to unsigned word. @@ -941,8 +928,15 @@ class MacroAssembler: public Assembler { void LoadContext(Register dst, int context_chain_length); + // Load the global object from the current context. + void LoadGlobalObject(Register dst) { + LoadNativeContextSlot(Context::EXTENSION_INDEX, dst); + } + // Load the global proxy from the current context. - void LoadGlobalProxy(Register dst); + void LoadGlobalProxy(Register dst) { + LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); + } // Conditionally load the cached Array transitioned map of type // transitioned_kind from the native context if the map in register @@ -955,7 +949,7 @@ class MacroAssembler: public Assembler { Register scratch, Label* no_map_match); - void LoadGlobalFunction(int index, Register function); + void LoadNativeContextSlot(int index, Register dst); // Load the initial map from the global function. The registers // function and map can be the same, function is then overwritten. @@ -973,15 +967,20 @@ class MacroAssembler: public Assembler { // JavaScript invokes. // Invoke the JavaScript function code by either calling or jumping. - void InvokeCode(Register code, - const ParameterCount& expected, - const ParameterCount& actual, - InvokeFlag flag, - const CallWrapper& call_wrapper); + + void InvokeFunctionCode(Register function, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual, InvokeFlag flag, + const CallWrapper& call_wrapper); + + void FloodFunctionIfStepping(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. void InvokeFunction(Register function, + Register new_target, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper); @@ -1021,9 +1020,6 @@ class MacroAssembler: public Assembler { // Must preserve the result register. void PopStackHandler(); - // Copies a fixed number of fields of heap objects from src to dst. - void CopyFields(Register dst, Register src, RegList temps, int field_count); - // Copies a number of bytes from src to dst. All registers are clobbered. On // exit src and dst will point to the place just after where the last byte was // read or written and length will be zero. @@ -1032,12 +1028,11 @@ class MacroAssembler: public Assembler { Register length, Register scratch); - // Initialize fields with filler values. Fields starting at |start_offset| - // not including end_offset are overwritten with the value in |filler|. At - // the end the loop, |start_offset| takes the value of |end_offset|. - void InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler); + // Initialize fields with filler values. Fields starting at |current_address| + // not including |end_address| are overwritten with the value in |filler|. At + // the end the loop, |current_address| takes the value of |end_address|. + void InitializeFieldsWithFiller(Register current_address, + Register end_address, Register filler); // ------------------------------------------------------------------------- // Support functions. @@ -1187,45 +1182,42 @@ class MacroAssembler: public Assembler { // Usage: first call the appropriate arithmetic function, then call one of the // jump functions with the overflow_dst register as the second parameter. - void AdduAndCheckForOverflow(Register dst, - Register left, - Register right, - Register overflow_dst, - Register scratch = at); + inline void AddBranchOvf(Register dst, Register left, const Operand& right, + Label* overflow_label, Register scratch = at) { + AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch); + } - void AdduAndCheckForOverflow(Register dst, Register left, - const Operand& right, Register overflow_dst, - Register scratch = at); + inline void AddBranchNoOvf(Register dst, Register left, const Operand& right, + Label* no_overflow_label, Register scratch = at) { + AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); + } - void SubuAndCheckForOverflow(Register dst, - Register left, - Register right, - Register overflow_dst, - Register scratch = at); + void AddBranchOvf(Register dst, Register left, const Operand& right, + Label* overflow_label, Label* no_overflow_label, + Register scratch = at); - void SubuAndCheckForOverflow(Register dst, Register left, - const Operand& right, Register overflow_dst, - Register scratch = at); + void AddBranchOvf(Register dst, Register left, Register right, + Label* overflow_label, Label* no_overflow_label, + Register scratch = at); - void BranchOnOverflow(Label* label, - Register overflow_check, - BranchDelaySlot bd = PROTECT) { - Branch(label, lt, overflow_check, Operand(zero_reg), bd); - } - void BranchOnNoOverflow(Label* label, - Register overflow_check, - BranchDelaySlot bd = PROTECT) { - Branch(label, ge, overflow_check, Operand(zero_reg), bd); + inline void SubBranchOvf(Register dst, Register left, const Operand& right, + Label* overflow_label, Register scratch = at) { + SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch); } - void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) { - Ret(lt, overflow_check, Operand(zero_reg), bd); + inline void SubBranchNoOvf(Register dst, Register left, const Operand& right, + Label* no_overflow_label, Register scratch = at) { + SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch); } - void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) { - Ret(ge, overflow_check, Operand(zero_reg), bd); - } + void SubBranchOvf(Register dst, Register left, const Operand& right, + Label* overflow_label, Label* no_overflow_label, + Register scratch = at); + + void SubBranchOvf(Register dst, Register left, Register right, + Label* overflow_label, Label* no_overflow_label, + Register scratch = at); // ------------------------------------------------------------------------- // Runtime calls. @@ -1262,6 +1254,14 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT } // Convenience function: Same as above, but takes the fid instead. + void CallRuntime(Runtime::FunctionId fid, + SaveFPRegsMode save_doubles = kDontSaveFPRegs, + BranchDelaySlot bd = PROTECT) { + const Runtime::Function* function = Runtime::FunctionForId(fid); + CallRuntime(function, function->nargs, save_doubles, bd); + } + + // Convenience function: Same as above, but takes the fid instead. void CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles = kDontSaveFPRegs, BranchDelaySlot bd = PROTECT) { @@ -1273,17 +1273,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT int num_arguments, BranchDelaySlot bd = PROTECT); - // Tail call of a runtime routine (jump). - // Like JumpToExternalReference, but also takes care of passing the number - // of parameters. - void TailCallExternalReference(const ExternalReference& ext, - int num_arguments, - int result_size); // Convenience function: tail call a runtime routine (jump). - void TailCallRuntime(Runtime::FunctionId fid, - int num_arguments, - int result_size); + void TailCallRuntime(Runtime::FunctionId fid); int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments); @@ -1339,13 +1331,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT void InvokeBuiltin(int native_context_index, InvokeFlag flag, const CallWrapper& call_wrapper = NullCallWrapper()); - // Store the code object for the given builtin in the target register and - // setup the function in a1. - void GetBuiltinEntry(Register target, int native_context_index); - - // Store the function for the given builtin in the target register. - void GetBuiltinFunction(Register target, int native_context_index); - struct Unresolved { int pc; uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders. @@ -1411,14 +1396,23 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT Addu(reg, reg, reg); } + void SmiTag(Register dst, Register src) { Addu(dst, src, src); } + // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). void SmiTagCheckOverflow(Register reg, Register overflow); void SmiTagCheckOverflow(Register dst, Register src, Register overflow); - void SmiTag(Register dst, Register src) { - Addu(dst, src, src); + void BranchOnOverflow(Label* label, Register overflow_check, + BranchDelaySlot bd = PROTECT) { + Branch(label, lt, overflow_check, Operand(zero_reg), bd); } + void BranchOnNoOverflow(Label* label, Register overflow_check, + BranchDelaySlot bd = PROTECT) { + Branch(label, ge, overflow_check, Operand(zero_reg), bd); + } + + // Try to convert int32 to smi. If the value is to large, preserve // the original value and jump to not_a_smi. Destroys scratch and // sets flags. @@ -1488,6 +1482,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT // Abort execution if argument is not a JSFunction, enabled via --debug-code. void AssertFunction(Register object); + // Abort execution if argument is not a JSBoundFunction, + // enabled via --debug-code. + void AssertBoundFunction(Register object); + // Abort execution if argument is not undefined or an AllocationSite, enabled // via --debug-code. void AssertUndefinedOrAllocationSite(Register object, Register scratch); @@ -1674,8 +1672,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT // Helper functions for generating invokes. void InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, - Handle<Code> code_constant, - Register code_reg, Label* done, bool* definitely_mismatches, InvokeFlag flag, @@ -1729,8 +1725,7 @@ class CodePatcher { DONT_FLUSH }; - CodePatcher(byte* address, - int instructions, + CodePatcher(Isolate* isolate, byte* address, int instructions, FlushICache flush_cache = FLUSH); ~CodePatcher(); diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc index e9dd0d32dc..aa4224a54c 100644 --- a/deps/v8/src/mips/simulator-mips.cc +++ b/deps/v8/src/mips/simulator-mips.cc @@ -589,7 +589,7 @@ void MipsDebugger::Debug() { reinterpret_cast<intptr_t>(cur), *cur, *cur); HeapObject* obj = reinterpret_cast<HeapObject*>(*cur); int value = *cur; - Heap* current_heap = v8::internal::Isolate::Current()->heap(); + Heap* current_heap = sim_->isolate_->heap(); if (((value & 1) == 0) || current_heap->Contains(obj)) { PrintF(" ("); if ((value & 1) == 0) { @@ -968,7 +968,12 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { for (int i = 0; i < kNumFPURegisters; i++) { FPUregisters_[i] = 0; } - FCSR_ = 0; + if (IsMipsArchVariant(kMips32r6)) { + FCSR_ = kFCSRNaN2008FlagMask; + } else { + DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2)); + FCSR_ = 0; + } // The sp is initialized to point to the bottom (high address) of the // allocated stack area. To be safe in potential stack underflows we leave @@ -995,12 +1000,12 @@ Simulator::~Simulator() { free(stack_); } // offset from the swi instruction so the simulator knows what to call. class Redirection { public: - Redirection(void* external_function, ExternalReference::Type type) + Redirection(Isolate* isolate, void* external_function, + ExternalReference::Type type) : external_function_(external_function), swi_instruction_(rtCallRedirInstr), type_(type), next_(NULL) { - Isolate* isolate = Isolate::Current(); next_ = isolate->simulator_redirection(); Simulator::current(isolate)-> FlushICache(isolate->simulator_i_cache(), @@ -1016,14 +1021,13 @@ class Redirection { void* external_function() { return external_function_; } ExternalReference::Type type() { return type_; } - static Redirection* Get(void* external_function, + static Redirection* Get(Isolate* isolate, void* external_function, ExternalReference::Type type) { - Isolate* isolate = Isolate::Current(); Redirection* current = isolate->simulator_redirection(); for (; current != NULL; current = current->next_) { if (current->external_function_ == external_function) return current; } - return new Redirection(external_function, type); + return new Redirection(isolate, external_function, type); } static Redirection* FromSwiInstruction(Instruction* swi_instruction) { @@ -1068,9 +1072,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) { } -void* Simulator::RedirectExternalReference(void* external_function, +void* Simulator::RedirectExternalReference(Isolate* isolate, + void* external_function, ExternalReference::Type type) { - Redirection* redirection = Redirection::Get(external_function, type); + Redirection* redirection = Redirection::Get(isolate, external_function, type); return redirection->address_of_swi_instruction(); } @@ -1296,6 +1301,129 @@ unsigned int Simulator::get_fcsr_rounding_mode() { } +void Simulator::set_fpu_register_word_invalid_result(float original, + float rounded) { + if (FCSR_ & kFCSRNaN2008FlagMask) { + double max_int32 = std::numeric_limits<int32_t>::max(); + double min_int32 = std::numeric_limits<int32_t>::min(); + if (std::isnan(original)) { + set_fpu_register_word(fd_reg(), 0); + } else if (rounded > max_int32) { + set_fpu_register_word(fd_reg(), kFPUInvalidResult); + } else if (rounded < min_int32) { + set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); + } else { + UNREACHABLE(); + } + } else { + set_fpu_register_word(fd_reg(), kFPUInvalidResult); + } +} + + +void Simulator::set_fpu_register_invalid_result(float original, float rounded) { + if (FCSR_ & kFCSRNaN2008FlagMask) { + double max_int32 = std::numeric_limits<int32_t>::max(); + double min_int32 = std::numeric_limits<int32_t>::min(); + if (std::isnan(original)) { + set_fpu_register(fd_reg(), 0); + } else if (rounded > max_int32) { + set_fpu_register(fd_reg(), kFPUInvalidResult); + } else if (rounded < min_int32) { + set_fpu_register(fd_reg(), kFPUInvalidResultNegative); + } else { + UNREACHABLE(); + } + } else { + set_fpu_register(fd_reg(), kFPUInvalidResult); + } +} + + +void Simulator::set_fpu_register_invalid_result64(float original, + float rounded) { + if (FCSR_ & kFCSRNaN2008FlagMask) { + // The value of INT64_MAX (2^63-1) can't be represented as double exactly, + // loading the most accurate representation into max_int64, which is 2^63. + double max_int64 = std::numeric_limits<int64_t>::max(); + double min_int64 = std::numeric_limits<int64_t>::min(); + if (std::isnan(original)) { + set_fpu_register(fd_reg(), 0); + } else if (rounded >= max_int64) { + set_fpu_register(fd_reg(), kFPU64InvalidResult); + } else if (rounded < min_int64) { + set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); + } else { + UNREACHABLE(); + } + } else { + set_fpu_register(fd_reg(), kFPU64InvalidResult); + } +} + + +void Simulator::set_fpu_register_word_invalid_result(double original, + double rounded) { + if (FCSR_ & kFCSRNaN2008FlagMask) { + double max_int32 = std::numeric_limits<int32_t>::max(); + double min_int32 = std::numeric_limits<int32_t>::min(); + if (std::isnan(original)) { + set_fpu_register_word(fd_reg(), 0); + } else if (rounded > max_int32) { + set_fpu_register_word(fd_reg(), kFPUInvalidResult); + } else if (rounded < min_int32) { + set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); + } else { + UNREACHABLE(); + } + } else { + set_fpu_register_word(fd_reg(), kFPUInvalidResult); + } +} + + +void Simulator::set_fpu_register_invalid_result(double original, + double rounded) { + if (FCSR_ & kFCSRNaN2008FlagMask) { + double max_int32 = std::numeric_limits<int32_t>::max(); + double min_int32 = std::numeric_limits<int32_t>::min(); + if (std::isnan(original)) { + set_fpu_register(fd_reg(), 0); + } else if (rounded > max_int32) { + set_fpu_register(fd_reg(), kFPUInvalidResult); + } else if (rounded < min_int32) { + set_fpu_register(fd_reg(), kFPUInvalidResultNegative); + } else { + UNREACHABLE(); + } + } else { + set_fpu_register(fd_reg(), kFPUInvalidResult); + } +} + + +void Simulator::set_fpu_register_invalid_result64(double original, + double rounded) { + if (FCSR_ & kFCSRNaN2008FlagMask) { + // The value of INT64_MAX (2^63-1) can't be represented as double exactly, + // loading the most accurate representation into max_int64, which is 2^63. + double max_int64 = std::numeric_limits<int64_t>::max(); + double min_int64 = std::numeric_limits<int64_t>::min(); + if (std::isnan(original)) { + set_fpu_register(fd_reg(), 0); + } else if (rounded >= max_int64) { + set_fpu_register(fd_reg(), kFPU64InvalidResult); + } else if (rounded < min_int64) { + set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); + } else { + UNREACHABLE(); + } + } else { + set_fpu_register(fd_reg(), kFPU64InvalidResult); + } +} + + // Sets the rounding error codes in FCSR based on the result of the rounding. // Returns true if the operation was invalid. bool Simulator::set_fcsr_round_error(double original, double rounded) { @@ -1332,6 +1460,8 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) { // Returns true if the operation was invalid. bool Simulator::set_fcsr_round64_error(double original, double rounded) { bool ret = false; + // The value of INT64_MAX (2^63-1) can't be represented as double exactly, + // loading the most accurate representation into max_int64, which is 2^63. double max_int64 = std::numeric_limits<int64_t>::max(); double min_int64 = std::numeric_limits<int64_t>::min(); @@ -1349,7 +1479,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) { ret = true; } - if (rounded > max_int64 || rounded < min_int64) { + if (rounded >= max_int64 || rounded < min_int64) { set_fcsr_bit(kFCSROverflowFlagBit, true); // The reference is not really clear but it seems this is required: set_fcsr_bit(kFCSRInvalidOpFlagBit, true); @@ -1396,6 +1526,8 @@ bool Simulator::set_fcsr_round_error(float original, float rounded) { // Returns true if the operation was invalid. bool Simulator::set_fcsr_round64_error(float original, float rounded) { bool ret = false; + // The value of INT64_MAX (2^63-1) can't be represented as double exactly, + // loading the most accurate representation into max_int64, which is 2^63. double max_int64 = std::numeric_limits<int64_t>::max(); double min_int64 = std::numeric_limits<int64_t>::min(); @@ -1413,7 +1545,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) { ret = true; } - if (rounded > max_int64 || rounded < min_int64) { + if (rounded >= max_int64 || rounded < min_int64) { set_fcsr_bit(kFCSROverflowFlagBit, true); // The reference is not really clear but it seems this is required: set_fcsr_bit(kFCSRInvalidOpFlagBit, true); @@ -2372,11 +2504,13 @@ void Simulator::DecodeTypeRegisterDRsType() { set_fpu_register_double(fd_reg(), -fs); break; case SQRT_D: - set_fpu_register_double(fd_reg(), fast_sqrt(fs)); + lazily_initialize_fast_sqrt(isolate_); + set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_)); break; case RSQRT_D: { DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - double result = 1.0 / fast_sqrt(fs); + lazily_initialize_fast_sqrt(isolate_); + double result = 1.0 / fast_sqrt(fs, isolate_); set_fpu_register_double(fd_reg(), result); break; } @@ -2413,7 +2547,7 @@ void Simulator::DecodeTypeRegisterDRsType() { round_according_to_fcsr(fs, rounded, result, fs); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case ROUND_W_D: // Round double to word (round half to even). @@ -2427,7 +2561,7 @@ void Simulator::DecodeTypeRegisterDRsType() { } set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case TRUNC_W_D: // Truncate double to word (round towards 0). @@ -2436,7 +2570,7 @@ void Simulator::DecodeTypeRegisterDRsType() { int32_t result = static_cast<int32_t>(rounded); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case FLOOR_W_D: // Round double to word towards negative infinity. @@ -2445,7 +2579,7 @@ void Simulator::DecodeTypeRegisterDRsType() { int32_t result = static_cast<int32_t>(rounded); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case CEIL_W_D: // Round double to word towards positive infinity. @@ -2454,7 +2588,7 @@ void Simulator::DecodeTypeRegisterDRsType() { int32_t result = static_cast<int32_t>(rounded); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case CVT_S_D: // Convert double to float (single). @@ -2467,7 +2601,7 @@ void Simulator::DecodeTypeRegisterDRsType() { round64_according_to_fcsr(fs, rounded, result, fs); set_fpu_register(fd_reg(), result); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2482,7 +2616,7 @@ void Simulator::DecodeTypeRegisterDRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2502,7 +2636,7 @@ void Simulator::DecodeTypeRegisterDRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2516,7 +2650,7 @@ void Simulator::DecodeTypeRegisterDRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2530,7 +2664,7 @@ void Simulator::DecodeTypeRegisterDRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2775,11 +2909,13 @@ void Simulator::DecodeTypeRegisterSRsType() { set_fpu_register_float(fd_reg(), -fs); break; case SQRT_S: - set_fpu_register_float(fd_reg(), fast_sqrt(fs)); + lazily_initialize_fast_sqrt(isolate_); + set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_)); break; case RSQRT_S: { DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - float result = 1.0 / fast_sqrt(fs); + lazily_initialize_fast_sqrt(isolate_); + float result = 1.0 / fast_sqrt(fs, isolate_); set_fpu_register_float(fd_reg(), result); break; } @@ -2931,7 +3067,7 @@ void Simulator::DecodeTypeRegisterSRsType() { int32_t result = static_cast<int32_t>(rounded); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case TRUNC_L_S: { // Mips32r2 instruction. @@ -2941,7 +3077,7 @@ void Simulator::DecodeTypeRegisterSRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2954,7 +3090,7 @@ void Simulator::DecodeTypeRegisterSRsType() { int32_t result = static_cast<int32_t>(rounded); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case FLOOR_L_S: { // Mips32r2 instruction. @@ -2964,7 +3100,7 @@ void Simulator::DecodeTypeRegisterSRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -2981,7 +3117,7 @@ void Simulator::DecodeTypeRegisterSRsType() { } set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } break; } @@ -2998,7 +3134,7 @@ void Simulator::DecodeTypeRegisterSRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -3011,7 +3147,7 @@ void Simulator::DecodeTypeRegisterSRsType() { int32_t result = static_cast<int32_t>(rounded); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } } break; case CEIL_L_S: { // Mips32r2 instruction. @@ -3021,7 +3157,7 @@ void Simulator::DecodeTypeRegisterSRsType() { if (IsFp64Mode()) { set_fpu_register(fd_reg(), i64); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -3103,7 +3239,7 @@ void Simulator::DecodeTypeRegisterSRsType() { round64_according_to_fcsr(fs, rounded, result, fs); set_fpu_register(fd_reg(), result); if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); + set_fpu_register_invalid_result64(fs, rounded); } } else { UNSUPPORTED(); @@ -3116,7 +3252,7 @@ void Simulator::DecodeTypeRegisterSRsType() { round_according_to_fcsr(fs, rounded, result, fs); set_fpu_register_word(fd_reg(), result); if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); + set_fpu_register_word_invalid_result(fs, rounded); } break; } @@ -3245,11 +3381,18 @@ void Simulator::DecodeTypeRegisterCOP1() { case MFHC1: set_register(rt_reg(), get_fpu_register_hi_word(fs_reg())); break; - case CTC1: + case CTC1: { // At the moment only FCSR is supported. DCHECK(fs_reg() == kFCSRRegister); - FCSR_ = registers_[rt_reg()]; + int32_t reg = registers_[rt_reg()]; + if (IsMipsArchVariant(kMips32r6)) { + FCSR_ = reg | kFCSRNaN2008FlagMask; + } else { + DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2)); + FCSR_ = reg & ~kFCSRNaN2008FlagMask; + } break; + } case MTC1: // Hardware writes upper 32-bits to zero on mtc1. set_fpu_register_hi_word(fs_reg(), 0); @@ -3371,9 +3514,19 @@ void Simulator::DecodeTypeRegisterSPECIAL() { SetResult(rd_reg(), static_cast<int32_t>(alu_out)); break; case SRAV: - alu_out = rt() >> rs(); - SetResult(rd_reg(), static_cast<int32_t>(alu_out)); + SetResult(rd_reg(), rt() >> rs()); + break; + case LSA: { + DCHECK(IsMipsArchVariant(kMips32r6)); + int8_t sa = lsa_sa() + 1; + int32_t _rt = rt(); + int32_t _rs = rs(); + int32_t res = _rs << sa; + res += _rt; + DCHECK_EQ(res, (rs() << (lsa_sa() + 1)) + rt()); + SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt()); break; + } case MFHI: // MFHI == CLZ on R6. if (!IsMipsArchVariant(kMips32r6)) { DCHECK(sa() == 0); @@ -3947,7 +4100,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { if (rs_reg != 0) { // BEQZC BranchCompactHelper(rs == 0, 21); } else { // JIC - CheckForbiddenSlot(get_pc()); next_pc = rt + imm16; } break; @@ -3955,9 +4107,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { if (rs_reg != 0) { // BNEZC BranchCompactHelper(rs != 0, 21); } else { // JIALC - int32_t current_pc = get_pc(); - CheckForbiddenSlot(current_pc); - set_register(31, current_pc + Instruction::kInstrSize); + set_register(31, get_pc() + Instruction::kInstrSize); next_pc = rt + imm16; } break; @@ -4040,7 +4190,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { SetResult(rt_reg, rs ^ oe_imm16); break; case LUI: - SetResult(rt_reg, oe_imm16 << 16); + if (rs_reg != 0) { + // AUI + DCHECK(IsMipsArchVariant(kMips32r6)); + SetResult(rt_reg, rs + (se_imm16 << 16)); + } else { + // LUI + SetResult(rt_reg, oe_imm16 << 16); + } break; // ------------- Memory instructions. case LB: diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h index 624d4acf80..8efe0bba9c 100644 --- a/deps/v8/src/mips/simulator-mips.h +++ b/deps/v8/src/mips/simulator-mips.h @@ -23,7 +23,7 @@ namespace v8 { namespace internal { // When running without a simulator we call the entry directly. -#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ +#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \ entry(p0, p1, p2, p3, p4) typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, @@ -34,9 +34,10 @@ typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, // should act as a function matching the type arm_regexp_matcher. // The fifth argument is a dummy that reserves the space used for // the return address added by the ExitFrame in native calls. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ - (FUNCTION_CAST<mips_regexp_matcher>(entry)( \ - p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) +#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \ + p7, p8) \ + (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \ + p7, p8)) // The stack limit beyond which we will throw stack overflow errors in // generated code. Because generated code on mips uses the C stack, we @@ -48,11 +49,13 @@ class SimulatorStack : public v8::internal::AllStatic { return c_limit; } - static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + static inline uintptr_t RegisterCTryCatch(Isolate* isolate, + uintptr_t try_catch_address) { + USE(isolate); return try_catch_address; } - static inline void UnregisterCTryCatch() { } + static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); } }; } // namespace internal @@ -168,6 +171,12 @@ class Simulator { void set_fpu_register_hi_word(int fpureg, int32_t value); void set_fpu_register_float(int fpureg, float value); void set_fpu_register_double(int fpureg, double value); + void set_fpu_register_invalid_result64(float original, float rounded); + void set_fpu_register_invalid_result(float original, float rounded); + void set_fpu_register_word_invalid_result(float original, float rounded); + void set_fpu_register_invalid_result64(double original, double rounded); + void set_fpu_register_invalid_result(double original, double rounded); + void set_fpu_register_word_invalid_result(double original, double rounded); int64_t get_fpu_register(int fpureg) const; int32_t get_fpu_register_word(int fpureg) const; int32_t get_fpu_register_signed_word(int fpureg) const; @@ -335,6 +344,7 @@ class Simulator { inline int32_t ft_reg() const { return currentInstr_->FtValue(); } inline int32_t fd_reg() const { return currentInstr_->FdValue(); } inline int32_t sa() const { return currentInstr_->SaValue(); } + inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); } inline void SetResult(int32_t rd_reg, int32_t alu_out) { set_register(rd_reg, alu_out); @@ -408,7 +418,8 @@ class Simulator { void SignalException(Exception e); // Runtime call support. - static void* RedirectExternalReference(void* external_function, + static void* RedirectExternalReference(Isolate* isolate, + void* external_function, ExternalReference::Type type); // Handle arguments and return value for runtime FP functions. @@ -464,13 +475,14 @@ class Simulator { // When running with the simulator transition into simulated execution at this // point. -#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ - reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ +#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \ + reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ - Simulator::current(Isolate::Current())->Call( \ - entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) +#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \ + p7, p8) \ + Simulator::current(isolate) \ + ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) // The simulator has its own stack. Thus it has a different stack limit from @@ -484,13 +496,14 @@ class SimulatorStack : public v8::internal::AllStatic { return Simulator::current(isolate)->StackLimit(c_limit); } - static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { - Simulator* sim = Simulator::current(Isolate::Current()); + static inline uintptr_t RegisterCTryCatch(Isolate* isolate, + uintptr_t try_catch_address) { + Simulator* sim = Simulator::current(isolate); return sim->PushAddress(try_catch_address); } - static inline void UnregisterCTryCatch() { - Simulator::current(Isolate::Current())->PopAddress(); + static inline void UnregisterCTryCatch(Isolate* isolate) { + Simulator::current(isolate)->PopAddress(); } }; |