diff options
author | Chris Dickinson <christopher.s.dickinson@gmail.com> | 2015-05-05 13:48:55 -0700 |
---|---|---|
committer | Rod Vagg <rod@vagg.org> | 2015-08-04 11:56:09 -0700 |
commit | d58e780504bdba6c5897c48428fd984c5b5f96fe (patch) | |
tree | 033f1568ae3f9f077aceb843b42eb1ed1739ce0f /deps/v8/src/ppc | |
parent | 21d31c08e7d0b6865e52452750b20b05e6dca443 (diff) | |
download | node-new-d58e780504bdba6c5897c48428fd984c5b5f96fe.tar.gz |
deps: update v8 to 4.3.61.21
* @indutny's SealHandleScope patch (484bebc38319fc7c622478037922ad73b2edcbf9)
has been cherry picked onto the top of V8 to make it compile.
* There's some test breakage in contextify.
* This was merged at the request of the TC.
PR-URL: https://github.com/iojs/io.js/pull/1632
Diffstat (limited to 'deps/v8/src/ppc')
-rw-r--r-- | deps/v8/src/ppc/OWNERS | 3 | ||||
-rw-r--r-- | deps/v8/src/ppc/assembler-ppc-inl.h | 172 | ||||
-rw-r--r-- | deps/v8/src/ppc/assembler-ppc.cc | 747 | ||||
-rw-r--r-- | deps/v8/src/ppc/assembler-ppc.h | 220 | ||||
-rw-r--r-- | deps/v8/src/ppc/builtins-ppc.cc | 298 | ||||
-rw-r--r-- | deps/v8/src/ppc/code-stubs-ppc.cc | 538 | ||||
-rw-r--r-- | deps/v8/src/ppc/codegen-ppc.cc | 6 | ||||
-rw-r--r-- | deps/v8/src/ppc/debug-ppc.cc | 47 | ||||
-rw-r--r-- | deps/v8/src/ppc/deoptimizer-ppc.cc | 10 | ||||
-rw-r--r-- | deps/v8/src/ppc/disasm-ppc.cc | 9 | ||||
-rw-r--r-- | deps/v8/src/ppc/frames-ppc.cc | 16 | ||||
-rw-r--r-- | deps/v8/src/ppc/frames-ppc.h | 15 | ||||
-rw-r--r-- | deps/v8/src/ppc/full-codegen-ppc.cc | 342 | ||||
-rw-r--r-- | deps/v8/src/ppc/interface-descriptors-ppc.cc | 6 | ||||
-rw-r--r-- | deps/v8/src/ppc/lithium-codegen-ppc.cc | 193 | ||||
-rw-r--r-- | deps/v8/src/ppc/lithium-ppc.cc | 23 | ||||
-rw-r--r-- | deps/v8/src/ppc/lithium-ppc.h | 32 | ||||
-rw-r--r-- | deps/v8/src/ppc/macro-assembler-ppc.cc | 354 | ||||
-rw-r--r-- | deps/v8/src/ppc/macro-assembler-ppc.h | 45 | ||||
-rw-r--r-- | deps/v8/src/ppc/simulator-ppc.cc | 22 |
20 files changed, 1494 insertions, 1604 deletions
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS new file mode 100644 index 0000000000..beecb3d0b1 --- /dev/null +++ b/deps/v8/src/ppc/OWNERS @@ -0,0 +1,3 @@ +joransiu@ca.ibm.com +mbrandy@us.ibm.com +michael_dawson@ca.ibm.com diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h index 6779ee3d88..d95c7ec596 100644 --- a/deps/v8/src/ppc/assembler-ppc-inl.h +++ b/deps/v8/src/ppc/assembler-ppc-inl.h @@ -51,14 +51,36 @@ bool CpuFeatures::SupportsCrankshaft() { return true; } void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { -#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL - if (RelocInfo::IsInternalReference(rmode_)) { - // absolute code pointer inside code object moves with the code object. - Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode); + // absolute code pointer inside code object moves with the code object. + if (IsInternalReference(rmode_)) { + // Jump table entry + Address target = Memory::Address_at(pc_); + Memory::Address_at(pc_) = target + delta; + } else { + // mov sequence + DCHECK(IsInternalReferenceEncoded(rmode_)); + Address target = Assembler::target_address_at(pc_, host_); + Assembler::set_target_address_at(pc_, host_, target + delta, + icache_flush_mode); + } +} + + +Address RelocInfo::target_internal_reference() { + if (IsInternalReference(rmode_)) { + // Jump table entry + return Memory::Address_at(pc_); + } else { + // mov sequence + DCHECK(IsInternalReferenceEncoded(rmode_)); + return Assembler::target_address_at(pc_, host_); } -#endif - // We do not use pc relative addressing on PPC, so there is - // nothing else to do. +} + + +Address RelocInfo::target_internal_reference_address() { + DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)); + return reinterpret_cast<Address>(pc_); } @@ -72,14 +94,6 @@ Address RelocInfo::target_address_address() { DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); -#if V8_OOL_CONSTANT_POOL - if (Assembler::IsConstantPoolLoadStart(pc_)) { - // We return the PC for ool constant pool since this function is used by the - // serializerer and expects the address to reside within the code object. - return reinterpret_cast<Address>(pc_); - } -#endif - // Read the address of the word containing the target_address in an // instruction stream. // The only architecture-independent user of this function is the serializer. @@ -94,13 +108,8 @@ Address RelocInfo::target_address_address() { Address RelocInfo::constant_pool_entry_address() { -#if V8_OOL_CONSTANT_POOL - return Assembler::target_constant_pool_address_at(pc_, - host_->constant_pool()); -#else UNREACHABLE(); return NULL; -#endif } @@ -134,22 +143,12 @@ Address Assembler::target_address_from_return_address(Address pc) { // mtlr ip // blrl // @ return address -#if V8_OOL_CONSTANT_POOL - if (IsConstantPoolLoadEnd(pc - 3 * kInstrSize)) { - return pc - (kMovInstructionsConstantPool + 2) * kInstrSize; - } -#endif - return pc - (kMovInstructionsNoConstantPool + 2) * kInstrSize; + return pc - (kMovInstructions + 2) * kInstrSize; } Address Assembler::return_address_from_call_start(Address pc) { -#if V8_OOL_CONSTANT_POOL - Address load_address = pc + (kMovInstructionsConstantPool - 1) * kInstrSize; - if (IsConstantPoolLoadEnd(load_address)) - return pc + (kMovInstructionsConstantPool + 2) * kInstrSize; -#endif - return pc + (kMovInstructionsNoConstantPool + 2) * kInstrSize; + return pc + (kMovInstructions + 2) * kInstrSize; } @@ -180,7 +179,7 @@ void RelocInfo::set_target_object(Object* target, } -Address RelocInfo::target_reference() { +Address RelocInfo::target_external_reference() { DCHECK(rmode_ == EXTERNAL_REFERENCE); return Assembler::target_address_at(pc_, host_); } @@ -227,13 +226,8 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode, } -#if V8_OOL_CONSTANT_POOL -static const int kNoCodeAgeInstructions = 7; -#else static const int kNoCodeAgeInstructions = 6; -#endif -static const int kCodeAgingInstructions = - Assembler::kMovInstructionsNoConstantPool + 3; +static const int kCodeAgingInstructions = Assembler::kMovInstructions + 3; static const int kNoCodeAgeSequenceInstructions = ((kNoCodeAgeInstructions >= kCodeAgingInstructions) ? kNoCodeAgeInstructions @@ -273,8 +267,8 @@ Address RelocInfo::call_address() { DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); // The pc_ offset of 0 assumes patched return sequence per - // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break - // slot per BreakLocationIterator::SetDebugBreakAtSlot(). + // BreakLocation::SetDebugBreakAtReturn(), or debug break + // slot per BreakLocation::SetDebugBreakAtSlot(). return Assembler::target_address_at(pc_, host_); } @@ -308,15 +302,25 @@ Object** RelocInfo::call_object_address() { void RelocInfo::WipeOut() { DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || - IsRuntimeEntry(rmode_) || IsExternalReference(rmode_)); - Assembler::set_target_address_at(pc_, host_, NULL); + IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || + IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)); + if (IsInternalReference(rmode_)) { + // Jump table entry + Memory::Address_at(pc_) = NULL; + } else if (IsInternalReferenceEncoded(rmode_)) { + // mov sequence + // Currently used only by deserializer, no need to flush. + Assembler::set_target_address_at(pc_, host_, NULL, SKIP_ICACHE_FLUSH); + } else { + Assembler::set_target_address_at(pc_, host_, NULL); + } } bool RelocInfo::IsPatchedReturnSequence() { // // The patched return sequence is defined by - // BreakLocationIterator::SetDebugBreakAtReturn() + // BreakLocation::SetDebugBreakAtReturn() // FIXED_SEQUENCE Instr instr0 = Assembler::instr_at(pc_); @@ -356,6 +360,9 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) { visitor->VisitCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); + } else if (mode == RelocInfo::INTERNAL_REFERENCE || + mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { + visitor->VisitInternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { visitor->VisitCodeAgeSequence(this); } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || @@ -380,6 +387,9 @@ void RelocInfo::Visit(Heap* heap) { StaticVisitor::VisitCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); + } else if (mode == RelocInfo::INTERNAL_REFERENCE || + mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { + StaticVisitor::VisitInternalReference(this); } else if (RelocInfo::IsCodeAgeSequence(mode)) { StaticVisitor::VisitCodeAgeSequence(heap, this); } else if (heap->isolate()->debug()->has_break_points() && @@ -459,59 +469,10 @@ Address Assembler::target_address_at(Address pc, (instr2 & kImm16Mask)); #endif } -#if V8_OOL_CONSTANT_POOL - return Memory::Address_at(target_constant_pool_address_at(pc, constant_pool)); -#else - DCHECK(false); - return (Address)0; -#endif -} - - -#if V8_OOL_CONSTANT_POOL -bool Assembler::IsConstantPoolLoadStart(Address pc) { -#if V8_TARGET_ARCH_PPC64 - if (!IsLi(instr_at(pc))) return false; - pc += kInstrSize; -#endif - return GetRA(instr_at(pc)).is(kConstantPoolRegister); -} - - -bool Assembler::IsConstantPoolLoadEnd(Address pc) { -#if V8_TARGET_ARCH_PPC64 - pc -= kInstrSize; -#endif - return IsConstantPoolLoadStart(pc); -} - - -int Assembler::GetConstantPoolOffset(Address pc) { - DCHECK(IsConstantPoolLoadStart(pc)); - Instr instr = instr_at(pc); - int offset = SIGN_EXT_IMM16((instr & kImm16Mask)); - return offset; -} - - -void Assembler::SetConstantPoolOffset(Address pc, int offset) { - DCHECK(IsConstantPoolLoadStart(pc)); - DCHECK(is_int16(offset)); - Instr instr = instr_at(pc); - instr &= ~kImm16Mask; - instr |= (offset & kImm16Mask); - instr_at_put(pc, instr); -} - -Address Assembler::target_constant_pool_address_at( - Address pc, ConstantPoolArray* constant_pool) { - Address addr = reinterpret_cast<Address>(constant_pool); - DCHECK(addr); - addr += GetConstantPoolOffset(pc); - return addr; + UNREACHABLE(); + return NULL; } -#endif // This sets the branch destination (which gets loaded at the call address). @@ -523,6 +484,18 @@ void Assembler::deserialization_set_special_target_at( set_target_address_at(instruction_payload, code, target); } + +void Assembler::deserialization_set_target_internal_reference_at( + Address pc, Address target, RelocInfo::Mode mode) { + if (RelocInfo::IsInternalReferenceEncoded(mode)) { + Code* code = NULL; + set_target_address_at(pc, code, target, SKIP_ICACHE_FLUSH); + } else { + Memory::Address_at(pc) = target; + } +} + + // This code assumes the FIXED_SEQUENCE of lis/ori void Assembler::set_target_address_at(Address pc, ConstantPoolArray* constant_pool, @@ -578,14 +551,9 @@ void Assembler::set_target_address_at(Address pc, CpuFeatures::FlushICache(p, 2 * kInstrSize); } #endif - } else { -#if V8_OOL_CONSTANT_POOL - Memory::Address_at(target_constant_pool_address_at(pc, constant_pool)) = - target; -#else - UNREACHABLE(); -#endif + return; } + UNREACHABLE(); } } } // namespace v8::internal diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc index 8bb45e36cc..7778ab1ce1 100644 --- a/deps/v8/src/ppc/assembler-ppc.cc +++ b/deps/v8/src/ppc/assembler-ppc.cc @@ -42,7 +42,6 @@ #include "src/base/cpu.h" #include "src/macro-assembler.h" #include "src/ppc/assembler-ppc-inl.h" -#include "src/serialize.h" namespace v8 { namespace internal { @@ -142,45 +141,21 @@ const char* DoubleRegister::AllocationIndexToString(int index) { // ----------------------------------------------------------------------------- // Implementation of RelocInfo -const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; +const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE | + 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED; bool RelocInfo::IsCodedSpecially() { // The deserializer needs to know whether a pointer is specially // coded. Being specially coded on PPC means that it is a lis/ori - // instruction sequence or is an out of line constant pool entry, - // and these are always the case inside code objects. + // instruction sequence, and these are always the case inside code + // objects. return true; } bool RelocInfo::IsInConstantPool() { -#if V8_OOL_CONSTANT_POOL - return Assembler::IsConstantPoolLoadStart(pc_); -#else return false; -#endif -} - - -void RelocInfo::PatchCode(byte* instructions, int instruction_count) { - // Patch the code at the current address with the supplied instructions. - Instr* pc = reinterpret_cast<Instr*>(pc_); - Instr* instr = reinterpret_cast<Instr*>(instructions); - for (int i = 0; i < instruction_count; i++) { - *(pc + i) = *(instr + i); - } - - // Indicate that code has changed. - CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize); -} - - -// Patch the code at the current PC with a call to the target address. -// Additional guard instructions can be added if required. -void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { - // Patch the code at the current address with a call to the target. - UNIMPLEMENTED(); } @@ -226,9 +201,6 @@ MemOperand::MemOperand(Register ra, Register rb) { Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) : AssemblerBase(isolate, buffer, buffer_size), recorded_ast_id_(TypeFeedbackId::None()), -#if V8_OOL_CONSTANT_POOL - constant_pool_builder_(), -#endif positions_recorder_(this) { reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); @@ -244,11 +216,13 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) trampoline_emitted_ = FLAG_force_long_branches; unbound_labels_count_ = 0; ClearRecordedAstId(); + relocations_.reserve(128); } void Assembler::GetCode(CodeDesc* desc) { - reloc_info_writer.Finish(); + EmitRelocations(); + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; @@ -401,32 +375,43 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) { const int kEndOfChain = -4; +// Dummy opcodes for unbound label mov instructions or jump table entries. +enum { + kUnboundMovLabelOffsetOpcode = 0 << 26, + kUnboundAddLabelOffsetOpcode = 1 << 26, + kUnboundMovLabelAddrOpcode = 2 << 26, + kUnboundJumpTableEntryOpcode = 3 << 26 +}; + + int Assembler::target_at(int pos) { Instr instr = instr_at(pos); // check which type of branch this is 16 or 26 bit offset int opcode = instr & kOpcodeMask; - if (BX == opcode) { - int imm26 = ((instr & kImm26Mask) << 6) >> 6; - imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present - if (imm26 == 0) return kEndOfChain; - return pos + imm26; - } else if (BCX == opcode) { - int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); - imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present - if (imm16 == 0) return kEndOfChain; - return pos + imm16; - } else if ((instr & ~kImm26Mask) == 0) { - // Emitted link to a label, not part of a branch (regexp PushBacktrack). - if (instr == 0) { - return kEndOfChain; - } else { - int32_t imm26 = SIGN_EXT_IMM26(instr); - return (imm26 + pos); - } + int link; + switch (opcode) { + case BX: + link = SIGN_EXT_IMM26(instr & kImm26Mask); + link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present + break; + case BCX: + link = SIGN_EXT_IMM16((instr & kImm16Mask)); + link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present + break; + case kUnboundMovLabelOffsetOpcode: + case kUnboundAddLabelOffsetOpcode: + case kUnboundMovLabelAddrOpcode: + case kUnboundJumpTableEntryOpcode: + link = SIGN_EXT_IMM26(instr & kImm26Mask); + link <<= 2; + break; + default: + DCHECK(false); + return -1; } - DCHECK(false); - return -1; + if (link == 0) return kEndOfChain; + return pos + link; } @@ -434,51 +419,74 @@ void Assembler::target_at_put(int pos, int target_pos) { Instr instr = instr_at(pos); int opcode = instr & kOpcodeMask; - // check which type of branch this is 16 or 26 bit offset - if (BX == opcode) { - int imm26 = target_pos - pos; - DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0); - if (imm26 == kInstrSize && !(instr & kLKMask)) { - // Branch to next instr without link. - instr = ORI; // nop: ori, 0,0,0 - } else { - instr &= ((~kImm26Mask) | kAAMask | kLKMask); - instr |= (imm26 & kImm26Mask); + switch (opcode) { + case BX: { + int imm26 = target_pos - pos; + DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0); + if (imm26 == kInstrSize && !(instr & kLKMask)) { + // Branch to next instr without link. + instr = ORI; // nop: ori, 0,0,0 + } else { + instr &= ((~kImm26Mask) | kAAMask | kLKMask); + instr |= (imm26 & kImm26Mask); + } + instr_at_put(pos, instr); + break; } - instr_at_put(pos, instr); - return; - } else if (BCX == opcode) { - int imm16 = target_pos - pos; - DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0); - if (imm16 == kInstrSize && !(instr & kLKMask)) { - // Branch to next instr without link. - instr = ORI; // nop: ori, 0,0,0 - } else { - instr &= ((~kImm16Mask) | kAAMask | kLKMask); - instr |= (imm16 & kImm16Mask); + case BCX: { + int imm16 = target_pos - pos; + DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0); + if (imm16 == kInstrSize && !(instr & kLKMask)) { + // Branch to next instr without link. + instr = ORI; // nop: ori, 0,0,0 + } else { + instr &= ((~kImm16Mask) | kAAMask | kLKMask); + instr |= (imm16 & kImm16Mask); + } + instr_at_put(pos, instr); + break; } - instr_at_put(pos, instr); - return; - } else if ((instr & ~kImm26Mask) == 0) { - DCHECK(target_pos == kEndOfChain || target_pos >= 0); - // Emitted link to a label, not part of a branch (regexp PushBacktrack). - // Load the position of the label relative to the generated code object - // pointer in a register. - - Register dst = r3; // we assume r3 for now - DCHECK(IsNop(instr_at(pos + kInstrSize))); - uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag); - CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, - CodePatcher::DONT_FLUSH); - int target_hi = static_cast<int>(target) >> 16; - int target_lo = static_cast<int>(target) & 0XFFFF; - - patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi))); - patcher.masm()->ori(dst, dst, Operand(target_lo)); - return; + case kUnboundMovLabelOffsetOpcode: { + // Load the position of the label relative to the generated code object + // pointer in a register. + Register dst = Register::from_code(instr_at(pos + kInstrSize)); + int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag); + CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, + CodePatcher::DONT_FLUSH); + patcher.masm()->bitwise_mov32(dst, offset); + break; + } + case kUnboundAddLabelOffsetOpcode: { + // dst = base + position + immediate + Instr operands = instr_at(pos + kInstrSize); + Register dst = Register::from_code((operands >> 21) & 0x1f); + Register base = Register::from_code((operands >> 16) & 0x1f); + int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask); + CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, + CodePatcher::DONT_FLUSH); + patcher.masm()->bitwise_add32(dst, base, offset); + break; + } + case kUnboundMovLabelAddrOpcode: { + // Load the address of the label in a register. + Register dst = Register::from_code(instr_at(pos + kInstrSize)); + CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), + kMovInstructions, CodePatcher::DONT_FLUSH); + // Keep internal references relative until EmitRelocations. + patcher.masm()->bitwise_mov(dst, target_pos); + break; + } + case kUnboundJumpTableEntryOpcode: { + CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), + kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH); + // Keep internal references relative until EmitRelocations. + patcher.masm()->emit_ptr(target_pos); + break; + } + default: + DCHECK(false); + break; } - - DCHECK(false); } @@ -487,13 +495,16 @@ int Assembler::max_reach_from(int pos) { int opcode = instr & kOpcodeMask; // check which type of branch this is 16 or 26 bit offset - if (BX == opcode) { - return 26; - } else if (BCX == opcode) { - return 16; - } else if ((instr & ~kImm26Mask) == 0) { - // Emitted label constant, not part of a branch (regexp PushBacktrack). - return 26; + switch (opcode) { + case BX: + return 26; + case BCX: + return 16; + case kUnboundMovLabelOffsetOpcode: + case kUnboundAddLabelOffsetOpcode: + case kUnboundMovLabelAddrOpcode: + case kUnboundJumpTableEntryOpcode: + return 0; // no limit on reach } DCHECK(false); @@ -514,7 +525,7 @@ void Assembler::bind_to(Label* L, int pos) { int32_t offset = pos - fixup_pos; int maxReach = max_reach_from(fixup_pos); next(L); // call next before overwriting link with target at fixup_pos - if (is_intn(offset, maxReach) == false) { + if (maxReach && is_intn(offset, maxReach) == false) { if (trampoline_pos == kInvalidSlotPos) { trampoline_pos = get_trampoline_entry(); CHECK(trampoline_pos != kInvalidSlotPos); @@ -636,19 +647,19 @@ int32_t Assembler::get_trampoline_entry() { } -int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { - int target_pos; +int Assembler::link(Label* L) { + int position; if (L->is_bound()) { - target_pos = L->pos(); + position = L->pos(); } else { if (L->is_linked()) { - target_pos = L->pos(); // L's link + position = L->pos(); // L's link } else { // was: target_pos = kEndOfChain; - // However, using branch to self to mark the first reference + // However, using self to mark the first reference // should avoid most instances of branch offset overflow. See // target_at() for where this is converted back to kEndOfChain. - target_pos = pc_offset(); + position = pc_offset(); if (!trampoline_emitted_) { unbound_labels_count_++; next_buffer_check_ -= kTrampolineSlotsSize; @@ -657,7 +668,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { L->link_to(pc_offset()); } - return target_pos - pc_offset(); + return position; } @@ -1478,102 +1489,21 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o, // TOC and static chain are ignored and set to 0. void Assembler::function_descriptor() { #if ABI_USES_FUNCTION_DESCRIPTORS + Label instructions; DCHECK(pc_offset() == 0); - RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); - emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize); + emit_label_addr(&instructions); emit_ptr(0); emit_ptr(0); + bind(&instructions); #endif } -#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL -void Assembler::RelocateInternalReference(Address pc, intptr_t delta, - Address code_start, - ICacheFlushMode icache_flush_mode) { - DCHECK(delta || code_start); -#if ABI_USES_FUNCTION_DESCRIPTORS - uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc); - if (fd[1] == 0 && fd[2] == 0) { - // Function descriptor - if (delta) { - fd[0] += delta; - } else { - fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize; - } - return; - } -#endif -#if V8_OOL_CONSTANT_POOL - // mov for LoadConstantPoolPointerRegister - ConstantPoolArray* constant_pool = NULL; - if (delta) { - code_start = target_address_at(pc, constant_pool) + delta; - } - set_target_address_at(pc, constant_pool, code_start, icache_flush_mode); -#endif -} - - -int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) { -#if ABI_USES_FUNCTION_DESCRIPTORS - uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc); - if (fd[1] == 0 && fd[2] == 0) { - // Function descriptor - SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR - "]" - " function descriptor", - fd[0], fd[1], fd[2]); - return kPointerSize * 3; - } -#endif - return 0; -} -#endif - - -int Assembler::instructions_required_for_mov(const Operand& x) const { -#if V8_OOL_CONSTANT_POOL || DEBUG - bool canOptimize = - !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked()); -#endif -#if V8_OOL_CONSTANT_POOL - if (use_constant_pool_for_mov(x, canOptimize)) { - // Current usage guarantees that all constant pool references can - // use the same sequence. - return kMovInstructionsConstantPool; - } -#endif - DCHECK(!canOptimize); - return kMovInstructionsNoConstantPool; -} - - -#if V8_OOL_CONSTANT_POOL -bool Assembler::use_constant_pool_for_mov(const Operand& x, - bool canOptimize) const { - if (!is_ool_constant_pool_available() || is_constant_pool_full()) { - // If there is no constant pool available, we must use a mov - // immediate sequence. - return false; - } - - intptr_t value = x.immediate(); - if (canOptimize && is_int16(value)) { - // Prefer a single-instruction load-immediate. - return false; - } - - return true; -} - - void Assembler::EnsureSpaceFor(int space_needed) { if (buffer_space() <= (kGap + space_needed)) { - GrowBuffer(); + GrowBuffer(space_needed); } } -#endif bool Operand::must_output_reloc_info(const Assembler* assembler) const { @@ -1595,32 +1525,11 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const { // and only use the generic version when we require a fixed sequence void Assembler::mov(Register dst, const Operand& src) { intptr_t value = src.immediate(); + bool relocatable = src.must_output_reloc_info(this); bool canOptimize; - RelocInfo rinfo(pc_, src.rmode_, value, NULL); - if (src.must_output_reloc_info(this)) { - RecordRelocInfo(rinfo); - } - - canOptimize = !(src.must_output_reloc_info(this) || - (is_trampoline_pool_blocked() && !is_int16(value))); - -#if V8_OOL_CONSTANT_POOL - if (use_constant_pool_for_mov(src, canOptimize)) { - DCHECK(is_ool_constant_pool_available()); - ConstantPoolAddEntry(rinfo); -#if V8_TARGET_ARCH_PPC64 - BlockTrampolinePoolScope block_trampoline_pool(this); - // We are forced to use 2 instruction sequence since the constant - // pool pointer is tagged. - li(dst, Operand::Zero()); - ldx(dst, MemOperand(kConstantPoolRegister, dst)); -#else - lwz(dst, MemOperand(kConstantPoolRegister, 0)); -#endif - return; - } -#endif + canOptimize = + !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value))); if (canOptimize) { if (is_int16(value)) { @@ -1658,8 +1567,14 @@ void Assembler::mov(Register dst, const Operand& src) { } DCHECK(!canOptimize); + if (relocatable) { + RecordRelocInfo(src.rmode_); + } + bitwise_mov(dst, value); +} + - { +void Assembler::bitwise_mov(Register dst, intptr_t value) { BlockTrampolinePoolScope block_trampoline_pool(this); #if V8_TARGET_ARCH_PPC64 int32_t hi_32 = static_cast<int32_t>(value >> 32); @@ -1679,37 +1594,138 @@ void Assembler::mov(Register dst, const Operand& src) { lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); ori(dst, dst, Operand(lo_word)); #endif +} + + +void Assembler::bitwise_mov32(Register dst, int32_t value) { + BlockTrampolinePoolScope block_trampoline_pool(this); + int hi_word = static_cast<int>(value >> 16); + int lo_word = static_cast<int>(value & 0xffff); + lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); + ori(dst, dst, Operand(lo_word)); +} + + +void Assembler::bitwise_add32(Register dst, Register src, int32_t value) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (is_int16(value)) { + addi(dst, src, Operand(value)); + nop(); + } else { + int hi_word = static_cast<int>(value >> 16); + int lo_word = static_cast<int>(value & 0xffff); + if (lo_word & 0x8000) hi_word++; + addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word))); + addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word))); } } void Assembler::mov_label_offset(Register dst, Label* label) { + int position = link(label); if (label->is_bound()) { - int target = label->pos(); - mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag)); + // Load the position of the label relative to the generated code object. + mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag)); } else { - bool is_linked = label->is_linked(); - // Emit the link to the label in the code stream followed by extra - // nop instructions. - DCHECK(dst.is(r3)); // target_at_put assumes r3 for now - int link = is_linked ? label->pos() - pc_offset() : 0; - label->link_to(pc_offset()); - - if (!is_linked && !trampoline_emitted_) { - unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; - } + // Encode internal reference to unbound label. We use a dummy opcode + // such that it won't collide with any opcode that might appear in the + // label's chain. Encode the destination register in the 2nd instruction. + int link = position - pc_offset(); + DCHECK_EQ(0, link & 3); + link >>= 2; + DCHECK(is_int26(link)); // When the label is bound, these instructions will be patched // with a 2 instruction mov sequence that will load the // destination register with the position of the label from the // beginning of the code. // - // When the label gets bound: target_at extracts the link and - // target_at_put patches the instructions. + // target_at extracts the link and target_at_put patches the instructions. BlockTrampolinePoolScope block_trampoline_pool(this); - emit(link); + emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask)); + emit(dst.code()); + } +} + + +void Assembler::add_label_offset(Register dst, Register base, Label* label, + int delta) { + int position = link(label); + if (label->is_bound()) { + // dst = base + position + delta + position += delta; + bitwise_add32(dst, base, position); + } else { + // Encode internal reference to unbound label. We use a dummy opcode + // such that it won't collide with any opcode that might appear in the + // label's chain. Encode the operands in the 2nd instruction. + int link = position - pc_offset(); + DCHECK_EQ(0, link & 3); + link >>= 2; + DCHECK(is_int26(link)); + DCHECK(is_int16(delta)); + + BlockTrampolinePoolScope block_trampoline_pool(this); + emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask)); + emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask)); + } +} + + +void Assembler::mov_label_addr(Register dst, Label* label) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); + int position = link(label); + if (label->is_bound()) { + // Keep internal references relative until EmitRelocations. + bitwise_mov(dst, position); + } else { + // Encode internal reference to unbound label. We use a dummy opcode + // such that it won't collide with any opcode that might appear in the + // label's chain. Encode the destination register in the 2nd instruction. + int link = position - pc_offset(); + DCHECK_EQ(0, link & 3); + link >>= 2; + DCHECK(is_int26(link)); + + // When the label is bound, these instructions will be patched + // with a multi-instruction mov sequence that will load the + // destination register with the address of the label. + // + // target_at extracts the link and target_at_put patches the instructions. + BlockTrampolinePoolScope block_trampoline_pool(this); + emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask)); + emit(dst.code()); + DCHECK(kMovInstructions >= 2); + for (int i = 0; i < kMovInstructions - 2; i++) nop(); + } +} + + +void Assembler::emit_label_addr(Label* label) { + CheckBuffer(); + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + int position = link(label); + if (label->is_bound()) { + // Keep internal references relative until EmitRelocations. + emit_ptr(position); + } else { + // Encode internal reference to unbound label. We use a dummy opcode + // such that it won't collide with any opcode that might appear in the + // label's chain. + int link = position - pc_offset(); + DCHECK_EQ(0, link & 3); + link >>= 2; + DCHECK(is_int26(link)); + + // When the label is bound, the instruction(s) will be patched + // as a jump table entry containing the label address. target_at extracts + // the link and target_at_put patches the instruction(s). + BlockTrampolinePoolScope block_trampoline_pool(this); + emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask)); +#if V8_TARGET_ARCH_PPC64 nop(); +#endif } } @@ -2172,8 +2188,7 @@ bool Assembler::IsNop(Instr instr, int type) { } -// Debugging. -void Assembler::GrowBuffer() { +void Assembler::GrowBuffer(int needed) { if (!own_buffer_) FATAL("external code buffer is too small"); // Compute new buffer size. @@ -2185,6 +2200,10 @@ void Assembler::GrowBuffer() { } else { desc.buffer_size = buffer_size_ + 1 * MB; } + int space = buffer_space() + (desc.buffer_size - buffer_size_); + if (space < needed) { + desc.buffer_size += needed - space; + } CHECK_GT(desc.buffer_size, 0); // no overflow // Set up new buffer. @@ -2209,22 +2228,9 @@ void Assembler::GrowBuffer() { reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, reloc_info_writer.last_pc() + pc_delta); -// None of our relocation types are pc relative pointing outside the code -// buffer nor pc absolute pointing inside the code buffer, so there is no need -// to relocate any emitted relocation entries. - -#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL - // Relocate runtime entries. - for (RelocIterator it(desc); !it.done(); it.next()) { - RelocInfo::Mode rmode = it.rinfo()->rmode(); - if (rmode == RelocInfo::INTERNAL_REFERENCE) { - RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0); - } - } -#if V8_OOL_CONSTANT_POOL - constant_pool_builder_.Relocate(pc_delta); -#endif -#endif + // Nothing else to do here since we keep all internal references and + // deferred relocation entries relative to the buffer (until + // EmitRelocations). } @@ -2242,20 +2248,27 @@ void Assembler::dd(uint32_t data) { } -void Assembler::emit_ptr(uintptr_t data) { +void Assembler::emit_ptr(intptr_t data) { CheckBuffer(); - *reinterpret_cast<uintptr_t*>(pc_) = data; - pc_ += sizeof(uintptr_t); + *reinterpret_cast<intptr_t*>(pc_) = data; + pc_ += sizeof(intptr_t); +} + + +void Assembler::emit_double(double value) { + CheckBuffer(); + *reinterpret_cast<double*>(pc_) = value; + pc_ += sizeof(double); } void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - RelocInfo rinfo(pc_, rmode, data, NULL); + DeferredRelocInfo rinfo(pc_offset(), rmode, data); RecordRelocInfo(rinfo); } -void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { +void Assembler::RecordRelocInfo(const DeferredRelocInfo& rinfo) { if (rinfo.rmode() >= RelocInfo::JS_RETURN && rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. @@ -2271,19 +2284,46 @@ void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { return; } } - DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(), - RecordedAstId().ToInt(), NULL); + DeferredRelocInfo reloc_info_with_ast_id(rinfo.position(), rinfo.rmode(), + RecordedAstId().ToInt()); ClearRecordedAstId(); - reloc_info_writer.Write(&reloc_info_with_ast_id); + relocations_.push_back(reloc_info_with_ast_id); } else { - reloc_info_writer.Write(&rinfo); + relocations_.push_back(rinfo); } } } +void Assembler::EmitRelocations() { + EnsureSpaceFor(relocations_.size() * kMaxRelocSize); + + for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin(); + it != relocations_.end(); it++) { + RelocInfo::Mode rmode = it->rmode(); + Address pc = buffer_ + it->position(); + Code* code = NULL; + RelocInfo rinfo(pc, rmode, it->data(), code); + + // Fix up internal references now that they are guaranteed to be bound. + if (RelocInfo::IsInternalReference(rmode)) { + // Jump table entry + intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc)); + Memory::Address_at(pc) = buffer_ + pos; + } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) { + // mov sequence + intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code)); + set_target_address_at(pc, code, buffer_ + pos, SKIP_ICACHE_FLUSH); + } + + reloc_info_writer.Write(&rinfo); + } + + reloc_info_writer.Finish(); +} + + void Assembler::BlockTrampolinePoolFor(int instructions) { BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); } @@ -2339,193 +2379,14 @@ void Assembler::CheckTrampolinePool() { Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { -#if V8_OOL_CONSTANT_POOL - return constant_pool_builder_.New(isolate); -#else - // No out-of-line constant pool support. DCHECK(!FLAG_enable_ool_constant_pool); return isolate->factory()->empty_constant_pool_array(); -#endif } void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { -#if V8_OOL_CONSTANT_POOL - constant_pool_builder_.Populate(this, constant_pool); -#else - // No out-of-line constant pool support. DCHECK(!FLAG_enable_ool_constant_pool); -#endif } - - -#if V8_OOL_CONSTANT_POOL -ConstantPoolBuilder::ConstantPoolBuilder() - : size_(0), - entries_(), - current_section_(ConstantPoolArray::SMALL_SECTION) {} - - -bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; } - - -ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( - RelocInfo::Mode rmode) { -#if V8_TARGET_ARCH_PPC64 - // We don't support 32-bit entries at this time. - if (!RelocInfo::IsGCRelocMode(rmode)) { - return ConstantPoolArray::INT64; -#else - if (rmode == RelocInfo::NONE64) { - return ConstantPoolArray::INT64; - } else if (!RelocInfo::IsGCRelocMode(rmode)) { - return ConstantPoolArray::INT32; -#endif - } else if (RelocInfo::IsCodeTarget(rmode)) { - return ConstantPoolArray::CODE_PTR; - } else { - DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); - return ConstantPoolArray::HEAP_PTR; - } -} - - -ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( - Assembler* assm, const RelocInfo& rinfo) { - RelocInfo::Mode rmode = rinfo.rmode(); - DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION && - rmode != RelocInfo::STATEMENT_POSITION && - rmode != RelocInfo::CONST_POOL); - - // Try to merge entries which won't be patched. - int merged_index = -1; - ConstantPoolArray::LayoutSection entry_section = current_section_; - if (RelocInfo::IsNone(rmode) || - (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { - size_t i; - std::vector<ConstantPoolEntry>::const_iterator it; - for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { - if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { - // Merge with found entry. - merged_index = i; - entry_section = entries_[i].section_; - break; - } - } - } - DCHECK(entry_section <= current_section_); - entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); - - if (merged_index == -1) { - // Not merged, so update the appropriate count. - number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); - } - - // Check if we still have room for another entry in the small section - // given the limitations of the header's layout fields. - if (current_section_ == ConstantPoolArray::SMALL_SECTION) { - size_ = ConstantPoolArray::SizeFor(*small_entries()); - if (!is_uint12(size_)) { - current_section_ = ConstantPoolArray::EXTENDED_SECTION; - } - } else { - size_ = ConstantPoolArray::SizeForExtended(*small_entries(), - *extended_entries()); - } - - return entry_section; -} - - -void ConstantPoolBuilder::Relocate(intptr_t pc_delta) { - for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); - entry != entries_.end(); entry++) { - DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); - entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); - } -} - - -Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { - if (IsEmpty()) { - return isolate->factory()->empty_constant_pool_array(); - } else if (extended_entries()->is_empty()) { - return isolate->factory()->NewConstantPoolArray(*small_entries()); - } else { - DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION); - return isolate->factory()->NewExtendedConstantPoolArray( - *small_entries(), *extended_entries()); - } -} - - -void ConstantPoolBuilder::Populate(Assembler* assm, - ConstantPoolArray* constant_pool) { - DCHECK_EQ(extended_entries()->is_empty(), - !constant_pool->is_extended_layout()); - DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries( - constant_pool, ConstantPoolArray::SMALL_SECTION))); - if (constant_pool->is_extended_layout()) { - DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( - constant_pool, ConstantPoolArray::EXTENDED_SECTION))); - } - - // Set up initial offsets. - int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] - [ConstantPoolArray::NUMBER_OF_TYPES]; - for (int section = 0; section <= constant_pool->final_section(); section++) { - int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) - ? small_entries()->total_count() - : 0; - for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) { - ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i); - if (number_of_entries_[section].count_of(type) != 0) { - offsets[section][type] = constant_pool->OffsetOfElementAt( - number_of_entries_[section].base_of(type) + section_start); - } - } - } - - for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); - entry != entries_.end(); entry++) { - RelocInfo rinfo = entry->rinfo_; - RelocInfo::Mode rmode = entry->rinfo_.rmode(); - ConstantPoolArray::Type type = GetConstantPoolType(rmode); - - // Update constant pool if necessary and get the entry's offset. - int offset; - if (entry->merged_index_ == -1) { - offset = offsets[entry->section_][type]; - offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); - if (type == ConstantPoolArray::INT64) { -#if V8_TARGET_ARCH_PPC64 - constant_pool->set_at_offset(offset, rinfo.data()); -#else - constant_pool->set_at_offset(offset, rinfo.data64()); - } else if (type == ConstantPoolArray::INT32) { - constant_pool->set_at_offset(offset, - static_cast<int32_t>(rinfo.data())); -#endif - } else if (type == ConstantPoolArray::CODE_PTR) { - constant_pool->set_at_offset(offset, - reinterpret_cast<Address>(rinfo.data())); - } else { - DCHECK(type == ConstantPoolArray::HEAP_PTR); - constant_pool->set_at_offset(offset, - reinterpret_cast<Object*>(rinfo.data())); - } - offset -= kHeapObjectTag; - entry->merged_index_ = offset; // Stash offset for merged entries. - } else { - DCHECK(entry->merged_index_ < (entry - entries_.begin())); - offset = entries_[entry->merged_index_].merged_index_; - } - - // Patch load instruction with correct offset. - Assembler::SetConstantPoolOffset(rinfo.pc(), offset); - } -} -#endif } } // namespace v8::internal diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h index a3949556f3..bcc2d8f6b6 100644 --- a/deps/v8/src/ppc/assembler-ppc.h +++ b/deps/v8/src/ppc/assembler-ppc.h @@ -44,8 +44,8 @@ #include <vector> #include "src/assembler.h" +#include "src/compiler.h" #include "src/ppc/constants-ppc.h" -#include "src/serialize.h" #define ABI_USES_FUNCTION_DESCRIPTORS \ (V8_HOST_ARCH_PPC && (V8_OS_AIX || \ @@ -108,11 +108,7 @@ struct Register { static const int kAllocatableLowRangeBegin = 3; static const int kAllocatableLowRangeEnd = 10; static const int kAllocatableHighRangeBegin = 14; -#if V8_OOL_CONSTANT_POOL - static const int kAllocatableHighRangeEnd = 27; -#else static const int kAllocatableHighRangeEnd = 28; -#endif static const int kAllocatableContext = 30; static const int kNumAllocatableLow = @@ -178,14 +174,18 @@ struct Register { "r25", "r26", "r27", -#if !V8_OOL_CONSTANT_POOL "r28", -#endif "cp", }; return names[index]; } + static const RegList kAllocatable = + 1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 | + 1 << 14 | 1 << 15 | 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 | 1 << 20 | + 1 << 21 | 1 << 22 | 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26 | 1 << 27 | + 1 << 28 | 1 << 30; + static Register from_code(int code) { Register r = {code}; return r; @@ -242,7 +242,7 @@ const int kRegister_r24_Code = 24; const int kRegister_r25_Code = 25; const int kRegister_r26_Code = 26; const int kRegister_r27_Code = 27; -const int kRegister_r28_Code = 28; // constant pool pointer +const int kRegister_r28_Code = 28; const int kRegister_r29_Code = 29; // roots array pointer const int kRegister_r30_Code = 30; // context pointer const int kRegister_fp_Code = 31; // frame pointer @@ -286,9 +286,6 @@ const Register fp = {kRegister_fp_Code}; // Give alias names to registers const Register cp = {kRegister_r30_Code}; // JavaScript context pointer const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer. -#if V8_OOL_CONSTANT_POOL -const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool -#endif // Double word FP register. struct DoubleRegister { @@ -467,13 +464,6 @@ class Operand BASE_EMBEDDED { // Return true if this is a register operand. INLINE(bool is_reg() const); - // For mov. Return the number of actual instructions required to - // load the operand into a register. This can be anywhere from - // one (constant pool small section) to five instructions (full - // 64-bit sequence). - // - // The value returned is only valid as long as no entries are added to the - // constant pool between this call and the actual instruction being emitted. bool must_output_reloc_info(const Assembler* assembler) const; inline intptr_t immediate() const { @@ -527,75 +517,21 @@ class MemOperand BASE_EMBEDDED { }; -#if V8_OOL_CONSTANT_POOL -// Class used to build a constant pool. -class ConstantPoolBuilder BASE_EMBEDDED { +class DeferredRelocInfo { public: - ConstantPoolBuilder(); - ConstantPoolArray::LayoutSection AddEntry(Assembler* assm, - const RelocInfo& rinfo); - void Relocate(intptr_t pc_delta); - bool IsEmpty(); - Handle<ConstantPoolArray> New(Isolate* isolate); - void Populate(Assembler* assm, ConstantPoolArray* constant_pool); - - inline ConstantPoolArray::LayoutSection current_section() const { - return current_section_; - } - - // Rather than increasing the capacity of the ConstantPoolArray's - // small section to match the longer (16-bit) reach of PPC's load - // instruction (at the expense of a larger header to describe the - // layout), the PPC implementation utilizes the extended section to - // satisfy that reach. I.e. all entries (regardless of their - // section) are reachable with a single load instruction. - // - // This implementation does not support an unlimited constant pool - // size (which would require a multi-instruction sequence). [See - // ARM commit e27ab337 for a reference on the changes required to - // support the longer instruction sequence.] Note, however, that - // going down that path will necessarily generate that longer - // sequence for all extended section accesses since the placement of - // a given entry within the section is not known at the time of - // code generation. - // - // TODO(mbrandy): Determine whether there is a benefit to supporting - // the longer sequence given that nops could be used for those - // entries which are reachable with a single instruction. - inline bool is_full() const { return !is_int16(size_); } - - inline ConstantPoolArray::NumberOfEntries* number_of_entries( - ConstantPoolArray::LayoutSection section) { - return &number_of_entries_[section]; - } + DeferredRelocInfo() {} + DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data) + : position_(position), rmode_(rmode), data_(data) {} - inline ConstantPoolArray::NumberOfEntries* small_entries() { - return number_of_entries(ConstantPoolArray::SMALL_SECTION); - } - - inline ConstantPoolArray::NumberOfEntries* extended_entries() { - return number_of_entries(ConstantPoolArray::EXTENDED_SECTION); - } + int position() const { return position_; } + RelocInfo::Mode rmode() const { return rmode_; } + intptr_t data() const { return data_; } private: - struct ConstantPoolEntry { - ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section, - int merged_index) - : rinfo_(rinfo), section_(section), merged_index_(merged_index) {} - - RelocInfo rinfo_; - ConstantPoolArray::LayoutSection section_; - int merged_index_; - }; - - ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode); - - uint32_t size_; - std::vector<ConstantPoolEntry> entries_; - ConstantPoolArray::LayoutSection current_section_; - ConstantPoolArray::NumberOfEntries number_of_entries_[2]; + int position_; + RelocInfo::Mode rmode_; + intptr_t data_; }; -#endif class Assembler : public AssemblerBase { @@ -637,6 +573,12 @@ class Assembler : public AssemblerBase { // but it may be bound only once. void bind(Label* L); // binds an unbound label L to the current code position + + // Links a label at the current pc_offset(). If already bound, returns the + // bound position. If already linked, returns the position of the prior link. + // Otherwise, returns the current pc_offset(). + int link(Label* L); + // Determines if Label is bound and near enough so that a single // branch instruction can be used to reach it. bool is_near(Label* L, Condition cond); @@ -644,24 +586,15 @@ class Assembler : public AssemblerBase { // Returns the branch offset to the given label from the current code position // Links the label to the current position if it is still unbound // Manages the jump elimination optimization if the second parameter is true. - int branch_offset(Label* L, bool jump_elimination_allowed); + int branch_offset(Label* L, bool jump_elimination_allowed) { + int position = link(L); + return position - pc_offset(); + } // Puts a labels target address at the given position. // The high 8 bits are set to zero. void label_at_put(Label* L, int at_offset); -#if V8_OOL_CONSTANT_POOL - INLINE(static bool IsConstantPoolLoadStart(Address pc)); - INLINE(static bool IsConstantPoolLoadEnd(Address pc)); - INLINE(static int GetConstantPoolOffset(Address pc)); - INLINE(static void SetConstantPoolOffset(Address pc, int offset)); - - // Return the address in the constant pool of the code target address used by - // the branch/call instruction at pc, or the object in a mov. - INLINE(static Address target_constant_pool_address_at( - Address pc, ConstantPoolArray* constant_pool)); -#endif - // Read/Modify the code target address in the branch/call instruction at pc. INLINE(static Address target_address_at(Address pc, ConstantPoolArray* constant_pool)); @@ -669,13 +602,13 @@ class Assembler : public AssemblerBase { Address pc, ConstantPoolArray* constant_pool, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); INLINE(static Address target_address_at(Address pc, Code* code)) { - ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; + ConstantPoolArray* constant_pool = NULL; return target_address_at(pc, constant_pool); } INLINE(static void set_target_address_at( Address pc, Code* code, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { - ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; + ConstantPoolArray* constant_pool = NULL; set_target_address_at(pc, constant_pool, target, icache_flush_mode); } @@ -695,6 +628,11 @@ class Assembler : public AssemblerBase { inline static void deserialization_set_special_target_at( Address instruction_payload, Code* code, Address target); + // This sets the internal reference at the pc. + inline static void deserialization_set_target_internal_reference_at( + Address pc, Address target, + RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); + // Size of an instruction. static const int kInstrSize = sizeof(Instr); @@ -708,16 +646,11 @@ class Assembler : public AssemblerBase { // Number of instructions to load an address via a mov sequence. #if V8_TARGET_ARCH_PPC64 - static const int kMovInstructionsConstantPool = 2; - static const int kMovInstructionsNoConstantPool = 5; + static const int kMovInstructions = 5; + static const int kTaggedLoadInstructions = 2; #else - static const int kMovInstructionsConstantPool = 1; - static const int kMovInstructionsNoConstantPool = 2; -#endif -#if V8_OOL_CONSTANT_POOL - static const int kMovInstructions = kMovInstructionsConstantPool; -#else - static const int kMovInstructions = kMovInstructionsNoConstantPool; + static const int kMovInstructions = 2; + static const int kTaggedLoadInstructions = 1; #endif // Distance between the instruction referring to the address of the call @@ -747,15 +680,15 @@ class Assembler : public AssemblerBase { // blrl static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; - // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn() + // This is the length of the BreakLocation::SetDebugBreakAtReturn() // code patch FIXED_SEQUENCE - static const int kJSReturnSequenceInstructions = - kMovInstructionsNoConstantPool + 3; + static const int kJSReturnSequenceInstructions = kMovInstructions + 3; + static const int kJSReturnSequenceLength = + kJSReturnSequenceInstructions * kInstrSize; // This is the length of the code sequence from SetDebugBreakAtSlot() // FIXED_SEQUENCE - static const int kDebugBreakSlotInstructions = - kMovInstructionsNoConstantPool + 2; + static const int kDebugBreakSlotInstructions = kMovInstructions + 2; static const int kDebugBreakSlotLength = kDebugBreakSlotInstructions * kInstrSize; @@ -1076,11 +1009,26 @@ class Assembler : public AssemblerBase { void cmplw(Register src1, Register src2, CRegister cr = cr7); void mov(Register dst, const Operand& src); + void bitwise_mov(Register dst, intptr_t value); + void bitwise_mov32(Register dst, int32_t value); + void bitwise_add32(Register dst, Register src, int32_t value); // Load the position of the label relative to the generated code object // pointer in a register. void mov_label_offset(Register dst, Label* label); + // dst = base + label position + delta + void add_label_offset(Register dst, Register base, Label* label, + int delta = 0); + + // Load the address of the label in a register and associate with an + // internal reference relocation. + void mov_label_addr(Register dst, Label* label); + + // Emit the address of the label (i.e. a jump table entry) and associate with + // an internal reference relocation. + void emit_label_addr(Label* label); + // Multiply instructions void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE, RCBit r = LeaveRC); @@ -1283,13 +1231,14 @@ class Assembler : public AssemblerBase { // Record a deoptimization reason that can be used by a log or cpu profiler. // Use --trace-deopt to enable. - void RecordDeoptReason(const int reason, const int raw_position); + void RecordDeoptReason(const int reason, const SourcePosition position); // Writes a single byte or word of data in the code stream. Used // for inline tables, e.g., jump-tables. void db(uint8_t data); void dd(uint32_t data); - void emit_ptr(uintptr_t data); + void emit_ptr(intptr_t data); + void emit_double(double data); PositionsRecorder* positions_recorder() { return &positions_recorder_; } @@ -1335,22 +1284,12 @@ class Assembler : public AssemblerBase { void BlockTrampolinePoolFor(int instructions); void CheckTrampolinePool(); - int instructions_required_for_mov(const Operand& x) const; - -#if V8_OOL_CONSTANT_POOL - // Decide between using the constant pool vs. a mov immediate sequence. - bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const; - // The code currently calls CheckBuffer() too often. This has the side // effect of randomly growing the buffer in the middle of multi-instruction // sequences. - // MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation - // and multiple instructions. We cannot grow the buffer until the - // relocation and all of the instructions are written. // // This function allows outside callers to check and grow the buffer void EnsureSpaceFor(int space_needed); -#endif // Allocate a constant pool of the correct size for the generated code. Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); @@ -1358,23 +1297,7 @@ class Assembler : public AssemblerBase { // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); -#if V8_OOL_CONSTANT_POOL - bool is_constant_pool_full() const { - return constant_pool_builder_.is_full(); - } - - bool use_extended_constant_pool() const { - return constant_pool_builder_.current_section() == - ConstantPoolArray::EXTENDED_SECTION; - } -#endif - -#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL - static void RelocateInternalReference( - Address pc, intptr_t delta, Address code_start, - ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); - static int DecodeInternalReference(Vector<char> buffer, Address pc); -#endif + void EmitRelocations(); protected: // Relocation for a type-recording IC has the AST id added to it. This @@ -1392,13 +1315,7 @@ class Assembler : public AssemblerBase { // Record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); - void RecordRelocInfo(const RelocInfo& rinfo); -#if V8_OOL_CONSTANT_POOL - ConstantPoolArray::LayoutSection ConstantPoolAddEntry( - const RelocInfo& rinfo) { - return constant_pool_builder_.AddEntry(this, rinfo); - } -#endif + void RecordRelocInfo(const DeferredRelocInfo& rinfo); // Block the emission of the trampoline pool before pc_offset. void BlockTrampolinePoolBefore(int pc_offset) { @@ -1407,9 +1324,7 @@ class Assembler : public AssemblerBase { } void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } - void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; } - bool is_trampoline_pool_blocked() const { return trampoline_pool_blocked_nesting_ > 0; } @@ -1439,17 +1354,14 @@ class Assembler : public AssemblerBase { // Each relocation is encoded as a variable size value static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; RelocInfoWriter reloc_info_writer; + std::vector<DeferredRelocInfo> relocations_; // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; -#if V8_OOL_CONSTANT_POOL - ConstantPoolBuilder constant_pool_builder_; -#endif - // Code emission inline void CheckBuffer(); - void GrowBuffer(); + void GrowBuffer(int needed = 0); inline void emit(Instr x); inline void CheckTrampolinePoolQuick(); diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc index ca8704f9dd..c6f0336c4e 100644 --- a/deps/v8/src/ppc/builtins-ppc.cc +++ b/deps/v8/src/ppc/builtins-ppc.cc @@ -125,6 +125,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { __ Assert(eq, kUnexpectedInitialMapForArrayFunction); } + __ mr(r6, r4); // Run the native code for the Array function called as a normal function. // tail call a stub __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); @@ -232,7 +233,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ push(function); // Preserve the function. __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7); { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); __ push(r3); __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); } @@ -252,7 +253,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7); { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); __ push(argument); __ CallRuntime(Runtime::kNewStringWrapper, 1); } @@ -262,7 +263,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { static void CallRuntimePassFunction(MacroAssembler* masm, Runtime::FunctionId function_id) { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Push a copy of the function onto the stack. // Push function as parameter to the runtime call. __ Push(r4, r4); @@ -353,7 +354,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Enter a construct frame. { - FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT); + FrameScope scope(masm, StackFrame::CONSTRUCT); if (create_memento) { __ AssertUndefinedOrAllocationSite(r5, r7); @@ -752,7 +753,7 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) { CHECK(!FLAG_pretenuring_call_new); { - FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT); + FrameScope scope(masm, StackFrame::CONSTRUCT); // Smi-tagged arguments count. __ mr(r7, r3); @@ -760,7 +761,9 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) { // receiver is the hole. __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ Push(r7, ip); + + // smi arguments count, new.target, receiver + __ Push(r7, r6, ip); // Set up pointer to last argument. __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset)); @@ -772,7 +775,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) { // r7: number of arguments (smi-tagged) // cr0: compare against zero of arguments // sp[0]: receiver - // sp[1]: number of arguments (smi-tagged) + // sp[1]: new.target + // sp[2]: number of arguments (smi-tagged) Label loop, no_args; __ beq(&no_args, cr0); __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); @@ -784,6 +788,23 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) { __ bdnz(&loop); __ bind(&no_args); + __ addi(r3, r3, Operand(1)); + + // Handle step in. + Label skip_step_in; + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(masm->isolate()); + __ mov(r5, Operand(debug_step_in_fp)); + __ LoadP(r5, MemOperand(r5)); + __ and_(r0, r5, r5, SetRC); + __ beq(&skip_step_in, cr0); + + __ Push(r3, r4, r4); + __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1); + __ Pop(r3, r4); + + __ bind(&skip_step_in); + // Call the function. // r3: number of arguments // r4: constructor function @@ -896,12 +917,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Push a copy of the function onto the stack. // Push function as parameter to the runtime call. __ Push(r4, r4); // Whether to compile in a background thread. - __ Push(masm->isolate()->factory()->ToBoolean(concurrent)); + __ LoadRoot( + r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex); + __ push(r0); __ CallRuntime(Runtime::kCompileOptimized, 2); // Restore receiver. @@ -1007,7 +1030,7 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, SaveFPRegsMode save_doubles) { { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Preserve registers across notification, this is important for compiled // stubs that tail call the runtime on deopts passing their parameters in @@ -1036,7 +1059,7 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Pass the function and deoptimization type to the runtime system. __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type))); __ push(r3); @@ -1084,7 +1107,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Pass function as argument. __ push(r3); __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); @@ -1102,12 +1125,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // <deopt_data> = <code>[#deoptimization_data_offset] __ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset)); -#if V8_OOL_CONSTANT_POOL { - ConstantPoolUnavailableScope constant_pool_unavailable(masm); - __ LoadP(kConstantPoolRegister, - FieldMemOperand(r3, Code::kConstantPoolOffset)); -#endif + __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start // Load the OSR entrypoint offset from the deoptimization data. // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] @@ -1116,17 +1135,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { DeoptimizationInputData::kOsrPcOffsetIndex))); __ SmiUntag(r4); - // Compute the target address = code_obj + header_size + osr_offset - // <entry_addr> = <code_obj> + #header_size + <osr_offset> - __ add(r3, r3, r4); - __ addi(r0, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ mtlr(r0); + // Compute the target address = code start + osr_offset + __ add(r0, r3, r4); // And "return" to the OSR entry point of the function. - __ Ret(); -#if V8_OOL_CONSTANT_POOL + __ mtlr(r0); + __ blr(); } -#endif } @@ -1137,7 +1152,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { __ cmpl(sp, ip); __ bge(&ok); { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); __ CallRuntime(Runtime::kStackGuard, 0); } __ Jump(masm->isolate()->builtins()->OnStackReplacement(), @@ -1228,7 +1243,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { { // Enter an internal frame in order to preserve argument count. - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); __ SmiTag(r3); __ Push(r3, r5); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); @@ -1351,50 +1366,99 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { } -void Builtins::Generate_FunctionApply(MacroAssembler* masm) { - const int kIndexOffset = - StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize); - const int kLimitOffset = - StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize); - const int kArgsOffset = 2 * kPointerSize; - const int kRecvOffset = 3 * kPointerSize; - const int kFunctionOffset = 4 * kPointerSize; +static void Generate_CheckStackOverflow(MacroAssembler* masm, + const int calleeOffset) { + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); + // Make r5 the space we have left. The stack might already be overflowed + // here which will cause r5 to become negative. + __ sub(r5, sp, r5); + // Check if the arguments will overflow the stack. + __ SmiToPtrArrayOffset(r0, r3); + __ cmp(r5, r0); + __ bgt(&okay); // Signed comparison. + + // Out of stack space. + __ LoadP(r4, MemOperand(fp, calleeOffset)); + __ Push(r4, r3); + __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); + + __ bind(&okay); +} + + +static void Generate_PushAppliedArguments(MacroAssembler* masm, + const int argumentsOffset, + const int indexOffset, + const int limitOffset) { + Label entry, loop; + __ LoadP(r3, MemOperand(fp, indexOffset)); + __ b(&entry); + + // Load the current argument from the arguments array and push it to the + // stack. + // r3: current argument index + __ bind(&loop); + __ LoadP(r4, MemOperand(fp, argumentsOffset)); + __ Push(r4, r3); + + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(r3); + + // Use inline caching to access the arguments. + __ LoadP(r3, MemOperand(fp, indexOffset)); + __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0); + __ StoreP(r3, MemOperand(fp, indexOffset)); + + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ LoadP(r4, MemOperand(fp, limitOffset)); + __ cmp(r3, r4); + __ bne(&loop); + + // On exit, the pushed arguments count is in r0, untagged + __ SmiUntag(r3); +} + + +// Used by FunctionApply and ReflectApply +static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) { + const int kFormalParameters = targetIsArgument ? 3 : 2; + const int kStackSize = kFormalParameters + 1; { - FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); + FrameScope frame_scope(masm, StackFrame::INTERNAL); + const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize; + const int kReceiverOffset = kArgumentsOffset + kPointerSize; + const int kFunctionOffset = kReceiverOffset + kPointerSize; __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function __ push(r3); - __ LoadP(r3, MemOperand(fp, kArgsOffset)); // get the args array + __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array __ push(r3); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); - // Make r5 the space we have left. The stack might already be overflowed - // here which will cause r5 to become negative. - __ sub(r5, sp, r5); - // Check if the arguments will overflow the stack. - __ SmiToPtrArrayOffset(r0, r3); - __ cmp(r5, r0); - __ bgt(&okay); // Signed comparison. - - // Out of stack space. - __ LoadP(r4, MemOperand(fp, kFunctionOffset)); - __ Push(r4, r3); - __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); - // End of stack check. + if (targetIsArgument) { + __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION); + } else { + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + } + + Generate_CheckStackOverflow(masm, kFunctionOffset); // Push current limit and index. - __ bind(&okay); + const int kIndexOffset = + StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize); + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize); __ li(r4, Operand::Zero()); __ Push(r3, r4); // limit and initial index. // Get the receiver. - __ LoadP(r3, MemOperand(fp, kRecvOffset)); + __ LoadP(r3, MemOperand(fp, kReceiverOffset)); // Check that the function is a JS function (otherwise it must be a proxy). Label push_receiver; @@ -1462,43 +1526,18 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ push(r3); // Copy all arguments from the array to the stack. - Label entry, loop; - __ LoadP(r3, MemOperand(fp, kIndexOffset)); - __ b(&entry); - - // Load the current argument from the arguments array and push it to the - // stack. - // r3: current argument index - __ bind(&loop); - __ LoadP(r4, MemOperand(fp, kArgsOffset)); - __ Push(r4, r3); - - // Call the runtime to access the property in the arguments array. - __ CallRuntime(Runtime::kGetProperty, 2); - __ push(r3); - - // Use inline caching to access the arguments. - __ LoadP(r3, MemOperand(fp, kIndexOffset)); - __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0); - __ StoreP(r3, MemOperand(fp, kIndexOffset)); - - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ LoadP(r4, MemOperand(fp, kLimitOffset)); - __ cmp(r3, r4); - __ bne(&loop); + Generate_PushAppliedArguments(masm, kArgumentsOffset, kIndexOffset, + kLimitOffset); // Call the function. Label call_proxy; ParameterCount actual(r3); - __ SmiUntag(r3); __ LoadP(r4, MemOperand(fp, kFunctionOffset)); __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); __ bne(&call_proxy); __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper()); - __ LeaveFrame(StackFrame::INTERNAL, 3 * kPointerSize); + __ LeaveFrame(StackFrame::INTERNAL, kStackSize * kPointerSize); __ blr(); // Call the function proxy. @@ -1512,11 +1551,90 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Tear down the internal frame and remove function, receiver and args. } - __ addi(sp, sp, Operand(3 * kPointerSize)); + __ addi(sp, sp, Operand(kStackSize * kPointerSize)); __ blr(); } +static void Generate_ConstructHelper(MacroAssembler* masm) { + const int kFormalParameters = 3; + const int kStackSize = kFormalParameters + 1; + + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize; + const int kArgumentsOffset = kNewTargetOffset + kPointerSize; + const int kFunctionOffset = kArgumentsOffset + kPointerSize; + + // If newTarget is not supplied, set it to constructor + Label validate_arguments; + __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ bne(&validate_arguments); + __ LoadP(r3, MemOperand(fp, kFunctionOffset)); + __ StoreP(r3, MemOperand(fp, kNewTargetOffset)); + + // Validate arguments + __ bind(&validate_arguments); + __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function + __ push(r3); + __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array + __ push(r3); + __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target + __ push(r3); + __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION); + + Generate_CheckStackOverflow(masm, kFunctionOffset); + + // Push current limit and index. + const int kIndexOffset = + StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize); + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize); + __ li(r4, Operand::Zero()); + __ Push(r3, r4); // limit and initial index. + // Push newTarget and callee functions + __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); + __ push(r3); + __ LoadP(r3, MemOperand(fp, kFunctionOffset)); + __ push(r3); + + // Copy all arguments from the array to the stack. + Generate_PushAppliedArguments(masm, kArgumentsOffset, kIndexOffset, + kLimitOffset); + + // Use undefined feedback vector + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + __ LoadP(r4, MemOperand(fp, kFunctionOffset)); + + // Call the function. + CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + + __ Drop(1); + + // Leave internal frame. + } + __ addi(sp, sp, Operand(kStackSize * kPointerSize)); + __ blr(); +} + + +void Builtins::Generate_FunctionApply(MacroAssembler* masm) { + Generate_ApplyHelper(masm, false); +} + + +void Builtins::Generate_ReflectApply(MacroAssembler* masm) { + Generate_ApplyHelper(masm, true); +} + + +void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { + Generate_ConstructHelper(masm); +} + + static void ArgumentAdaptorStackCheck(MacroAssembler* masm, Label* stack_overflow) { // ----------- S t a t e ------------- @@ -1543,11 +1661,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ mflr(r0); __ push(r0); -#if V8_OOL_CONSTANT_POOL - __ Push(fp, kConstantPoolRegister, r7, r4, r3); -#else __ Push(fp, r7, r4, r3); -#endif __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); } diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc index 0226ffbf57..589f6d825e 100644 --- a/deps/v8/src/ppc/code-stubs-ppc.cc +++ b/deps/v8/src/ppc/code-stubs-ppc.cc @@ -12,6 +12,7 @@ #include "src/codegen.h" #include "src/ic/handler-compiler.h" #include "src/ic/ic.h" +#include "src/ic/stub-cache.h" #include "src/isolate.h" #include "src/jsregexp.h" #include "src/regexp-macro-assembler.h" @@ -110,7 +111,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, int param_count = descriptor.GetEnvironmentParameterCount(); { // Call the runtime system in a fresh internal frame. - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); DCHECK(param_count == 0 || r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1))); // Push arguments @@ -1070,22 +1071,11 @@ void CEntryStub::Generate(MacroAssembler* masm) { // know where the return address is. The CEntryStub is unmovable, so // we can store the address on the stack to be able to find it again and // we never have to restore it, because it will not change. - // Compute the return address in lr to return to after the jump below. Pc is - // already at '+ 8' from the current instruction but return is after three - // instructions so add another 4 to pc to get the return address. - { - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); - Label here; - __ b(&here, SetLK); - __ bind(&here); - __ mflr(r8); - - // Constant used below is dependent on size of Call() macro instructions - __ addi(r0, r8, Operand(20)); - - __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); - __ Call(target); - } + Label after_call; + __ mov_label_addr(r0, &after_call); + __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); + __ Call(target); + __ bind(&after_call); #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS // If return value is on the stack, pop it to registers. @@ -1110,13 +1100,13 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ CompareRoot(r3, Heap::kExceptionRootIndex); __ beq(&exception_returned); - ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress, - isolate()); - // Check that there is no pending exception, otherwise we // should have returned the exception sentinel. if (FLAG_debug_code) { Label okay; + ExternalReference pending_exception_address( + Isolate::kPendingExceptionAddress, isolate()); + __ mov(r5, Operand(pending_exception_address)); __ LoadP(r5, MemOperand(r5)); __ CompareRoot(r5, Heap::kTheHoleValueRootIndex); @@ -1137,25 +1127,53 @@ void CEntryStub::Generate(MacroAssembler* masm) { // Handling of exception. __ bind(&exception_returned); - // Retrieve the pending exception. - __ mov(r5, Operand(pending_exception_address)); - __ LoadP(r3, MemOperand(r5)); - - // Clear the pending exception. - __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); - __ StoreP(r6, MemOperand(r5)); - - // Special handling of termination exceptions which are uncatchable - // by javascript code. - Label throw_termination_exception; - __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex); - __ beq(&throw_termination_exception); - - // Handle normal exception. - __ Throw(r3); + ExternalReference pending_handler_context_address( + Isolate::kPendingHandlerContextAddress, isolate()); + ExternalReference pending_handler_code_address( + Isolate::kPendingHandlerCodeAddress, isolate()); + ExternalReference pending_handler_offset_address( + Isolate::kPendingHandlerOffsetAddress, isolate()); + ExternalReference pending_handler_fp_address( + Isolate::kPendingHandlerFPAddress, isolate()); + ExternalReference pending_handler_sp_address( + Isolate::kPendingHandlerSPAddress, isolate()); + + // Ask the runtime for help to determine the handler. This will set r3 to + // contain the current pending exception, don't clobber it. + ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate()); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(3, 0, r3); + __ li(r3, Operand::Zero()); + __ li(r4, Operand::Zero()); + __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); + __ CallCFunction(find_handler, 3); + } - __ bind(&throw_termination_exception); - __ ThrowUncatchable(r3); + // Retrieve the handler context, SP and FP. + __ mov(cp, Operand(pending_handler_context_address)); + __ LoadP(cp, MemOperand(cp)); + __ mov(sp, Operand(pending_handler_sp_address)); + __ LoadP(sp, MemOperand(sp)); + __ mov(fp, Operand(pending_handler_fp_address)); + __ LoadP(fp, MemOperand(fp)); + + // If the handler is a JS frame, restore the context to the frame. Note that + // the context will be set to (cp == 0) for non-JS frames. + Label skip; + __ cmpi(cp, Operand::Zero()); + __ beq(&skip); + __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ bind(&skip); + + // Compute the handler entry address and jump to it. + __ mov(r4, Operand(pending_handler_code_address)); + __ LoadP(r4, MemOperand(r4)); + __ mov(r5, Operand(pending_handler_offset_address)); + __ LoadP(r5, MemOperand(r5)); + __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start + __ add(ip, r4, r5); + __ Jump(ip); } @@ -1195,11 +1213,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // r7: argv __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used. __ push(r0); -#if V8_OOL_CONSTANT_POOL - __ mov(kConstantPoolRegister, - Operand(isolate()->factory()->empty_constant_pool_array())); - __ push(kConstantPoolRegister); -#endif int marker = type(); __ LoadSmiLiteral(r0, Smi::FromInt(marker)); __ push(r0); @@ -1236,7 +1249,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { handler_offset_ = handler_entry.pos(); // Caught exception: Store result (exception) in the pending exception // field in the JSEnv and return a failure sentinel. Coming in here the - // fp will be invalid because the PushTryHandler below sets it to 0 to + // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate()))); @@ -1245,11 +1258,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ LoadRoot(r3, Heap::kExceptionRootIndex); __ b(&exit); - // Invoke: Link this frame into the handler chain. There's only one - // handler block in this code object, so its index is 0. + // Invoke: Link this frame into the handler chain. __ bind(&invoke); - // Must preserve r0-r4, r5-r7 are available. (needs update for PPC) - __ PushTryHandler(StackHandler::JS_ENTRY, 0); + // Must preserve r3-r7. + __ PushStackHandler(); // If an exception not caught by another handler occurs, this handler // returns control to the code after the b(&invoke) above, which // restores all kCalleeSaved registers (including cp and fp) to their @@ -1288,7 +1300,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { __ bctrl(); // make the call // Unlink this frame from the handler chain. - __ PopTryHandler(); + __ PopStackHandler(); __ bind(&exit); // r3 holds result // Check if the current stack frame is marked as the outermost JS frame. @@ -1347,14 +1359,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { const Register scratch = r5; Register scratch3 = no_reg; -// delta = mov + unaligned LoadP + cmp + bne -#if V8_TARGET_ARCH_PPC64 - const int32_t kDeltaToLoadBoolResult = - (Assembler::kMovInstructions + 4) * Assembler::kInstrSize; -#else + // delta = mov + tagged LoadP + cmp + bne const int32_t kDeltaToLoadBoolResult = - (Assembler::kMovInstructions + 3) * Assembler::kInstrSize; -#endif + (Assembler::kMovInstructions + Assembler::kTaggedLoadInstructions + 2) * + Assembler::kInstrSize; Label slow, loop, is_instance, is_not_instance, not_js_object; @@ -1514,7 +1522,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); __ Push(r3, r4); __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); } @@ -1584,7 +1592,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) { __ Ret(); StubRuntimeCallHelper call_helper; - char_at_generator.GenerateSlow(masm, call_helper); + char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper); __ bind(&miss); PropertyAccessCompiler::TailCallBuiltin( @@ -1593,6 +1601,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + CHECK(!has_new_target()); // The displacement is the offset of the last parameter (if any) // relative to the frame pointer. const int kDisplacement = @@ -1653,6 +1662,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { // sp[1] : receiver displacement // sp[2] : function + CHECK(!has_new_target()); + // Check if the calling frame is an arguments adaptor frame. Label runtime; __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); @@ -1683,6 +1694,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { // r9 : allocated object (tagged) // r11 : mapped parameter count (tagged) + CHECK(!has_new_target()); + __ LoadP(r4, MemOperand(sp, 0 * kPointerSize)); // r4 = parameter count (tagged) @@ -1965,6 +1978,14 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Patch the arguments.length and the parameters pointer. __ bind(&adaptor_frame); __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); + if (has_new_target()) { + __ CmpSmiLiteral(r4, Smi::FromInt(0), r0); + Label skip_decrement; + __ beq(&skip_decrement); + // Subtract 1 from smi-tagged arguments count. + __ SubSmiLiteral(r4, r4, Smi::FromInt(1), r0); + __ bind(&skip_decrement); + } __ StoreP(r4, MemOperand(sp, 0)); __ SmiToPtrArrayOffset(r6, r4); __ add(r6, r5, r6); @@ -2051,12 +2072,37 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { } +void RestParamAccessStub::GenerateNew(MacroAssembler* masm) { + // Stack layout on entry. + // sp[0] : index of rest parameter + // sp[4] : number of parameters + // sp[8] : receiver displacement + + Label runtime; + __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset)); + __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); + __ bne(&runtime); + + // Patch the arguments.length and the parameters pointer. + __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ StoreP(r4, MemOperand(sp, 1 * kPointerSize)); + __ SmiToPtrArrayOffset(r6, r4); + __ add(r6, r5, r6); + __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); + __ StoreP(r6, MemOperand(sp, 2 * kPointerSize)); + + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewRestParam, 3, 1); +} + + void RegExpExecStub::Generate(MacroAssembler* masm) { // Just jump directly to runtime if native RegExp is not selected at compile // time or if regexp entry in generated code is turned off runtime switch or // at compilation. #ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP // Stack frame on entry. @@ -2323,20 +2369,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ LeaveExitFrame(false, no_reg, true); - // r3: result + // r3: result (int32) // subject: subject string (callee saved) // regexp_data: RegExp data (callee saved) // last_match_info_elements: Last match info elements (callee saved) // Check the result. Label success; - __ cmpi(r3, Operand(1)); + __ cmpwi(r3, Operand(1)); // We expect exactly one result since we force the called regexp to behave // as non-global. __ beq(&success); Label failure; - __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE)); + __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE)); __ beq(&failure); - __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION)); + __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION)); // If not exception it can only be retry. Handle that in the runtime system. __ bne(&runtime); // Result must now be exception. If there is no pending exception already a @@ -2350,18 +2396,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ cmp(r3, r4); __ beq(&runtime); - __ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception. - - // Check if the exception is a termination. If so, throw as uncatchable. - __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex); - - Label termination_exception; - __ beq(&termination_exception); - - __ Throw(r3); - - __ bind(&termination_exception); - __ ThrowUncatchable(r3); + // For exception, throw the exception again. + __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1); __ bind(&failure); // For failure and exception return null. @@ -2450,7 +2486,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Do the runtime call to execute the regexp. __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); // Deferred code for string handling. // (6) Not a long external string? If yes, go to (8). @@ -2562,7 +2598,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // Create an AllocationSite if we don't already have it, store it in the // slot. { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Arguments register must be smi-tagged to call out. __ SmiTag(r3); @@ -2648,7 +2684,7 @@ static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) { static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { // Wrap the receiver and patch it back onto the stack. { - FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); + FrameScope frame_scope(masm, StackFrame::INTERNAL); __ Push(r4, r6); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); __ pop(r4); @@ -2760,7 +2796,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) { } // Pass function as original constructor. - __ mr(r6, r4); + if (IsSuperConstructorCall()) { + __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2)); + __ addi(r7, r7, Operand(kPointerSize)); + __ LoadPX(r6, MemOperand(sp, r7)); + } else { + __ mr(r6, r4); + } // Jump to the function-specific construct stub. Register jmp_reg = r7; @@ -2823,6 +2865,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { __ bne(&miss); __ mr(r5, r7); + __ mr(r6, r4); ArrayConstructorStub stub(masm->isolate(), arg_count()); __ TailCallStub(&stub); @@ -2959,7 +3002,7 @@ void CallICStub::Generate(MacroAssembler* masm) { // r6 - slot // r4 - function { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); CreateWeakCellStub create_stub(masm->isolate()); __ Push(r4); __ CallStub(&create_stub); @@ -2987,7 +3030,7 @@ void CallICStub::Generate(MacroAssembler* masm) { void CallICStub::GenerateMiss(MacroAssembler* masm) { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Push the function and feedback info. __ Push(r4, r5, r6); @@ -3038,7 +3081,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + MacroAssembler* masm, EmbedMode embed_mode, + const RuntimeCallHelper& call_helper) { __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); // Index is not a smi. @@ -3047,8 +3091,13 @@ void StringCharCodeAtGenerator::GenerateSlow( __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_, DONT_DO_SMI_CHECK); call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); // Consumed by runtime conversion function. + if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) { + __ Push(VectorLoadICDescriptor::VectorRegister(), + VectorLoadICDescriptor::SlotRegister(), object_, index_); + } else { + // index_ is consumed by runtime conversion function. + __ Push(object_, index_); + } if (index_flags_ == STRING_INDEX_IS_NUMBER) { __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); } else { @@ -3059,7 +3108,12 @@ void StringCharCodeAtGenerator::GenerateSlow( // Save the conversion result before the pop instructions below // have a chance to overwrite it. __ Move(index_, r3); - __ pop(object_); + if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) { + __ Pop(VectorLoadICDescriptor::VectorRegister(), + VectorLoadICDescriptor::SlotRegister(), object_); + } else { + __ pop(object_); + } // Reload the instance type. __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); @@ -3371,7 +3425,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); + __ TailCallRuntime(Runtime::kSubStringRT, 3, 1); __ bind(&single_char); // r3: original string @@ -3504,8 +3558,8 @@ void StringHelper::GenerateCompareFlatOneByteStrings( // Conditionally update the result based either on length_delta or // the last comparion performed in the loop above. if (CpuFeatures::IsSupported(ISELECT)) { - __ li(r4, Operand(GREATER)); - __ li(r5, Operand(LESS)); + __ LoadSmiLiteral(r4, Smi::FromInt(GREATER)); + __ LoadSmiLiteral(r5, Smi::FromInt(LESS)); __ isel(eq, r3, r0, r4); __ isel(lt, r3, r5, r3); __ Ret(); @@ -3584,7 +3638,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1); } @@ -3890,7 +3944,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) { if (equality) { __ TailCallRuntime(Runtime::kStringEquals, 2, 1); } else { - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); + __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1); } __ bind(&miss); @@ -3945,7 +3999,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) { ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); __ Push(r4, r3); __ Push(r4, r3); __ LoadSmiLiteral(r0, Smi::FromInt(op())); @@ -4509,15 +4563,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { void LoadICTrampolineStub::Generate(MacroAssembler* masm) { EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister()); - VectorLoadStub stub(isolate(), state()); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + VectorRawLoadStub stub(isolate(), state()); + stub.GenerateForTrampoline(masm); } void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister()); - VectorKeyedLoadStub stub(isolate()); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + VectorRawKeyedLoadStub stub(isolate()); + stub.GenerateForTrampoline(masm); } @@ -4535,6 +4589,248 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) { } +void VectorRawLoadStub::Generate(MacroAssembler* masm) { + GenerateImpl(masm, false); +} + + +void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { + GenerateImpl(masm, true); +} + + +static void HandleArrayCases(MacroAssembler* masm, Register receiver, + Register key, Register vector, Register slot, + Register feedback, Register scratch1, + Register scratch2, Register scratch3, + bool is_polymorphic, Label* miss) { + // feedback initially contains the feedback array + Label next_loop, prepare_next; + Label load_smi_map, compare_map; + Label start_polymorphic; + + Register receiver_map = scratch1; + Register cached_map = scratch2; + + // Receiver might not be a heap object. + __ JumpIfSmi(receiver, &load_smi_map); + __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ bind(&compare_map); + __ LoadP(cached_map, + FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0))); + __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); + __ cmp(receiver_map, cached_map); + __ bne(&start_polymorphic); + // found, now call handler. + Register handler = feedback; + __ LoadP(handler, + FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1))); + __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(ip); + + + Register length = scratch3; + __ bind(&start_polymorphic); + __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset)); + if (!is_polymorphic) { + // If the IC could be monomorphic we have to make sure we don't go past the + // end of the feedback array. + __ CmpSmiLiteral(length, Smi::FromInt(2), r0); + __ beq(miss); + } + + Register too_far = length; + Register pointer_reg = feedback; + + // +-----+------+------+-----+-----+ ... ----+ + // | map | len | wm0 | h0 | wm1 | hN | + // +-----+------+------+-----+-----+ ... ----+ + // 0 1 2 len-1 + // ^ ^ + // | | + // pointer_reg too_far + // aka feedback scratch3 + // also need receiver_map (aka scratch1) + // use cached_map (scratch2) to look in the weak map values. + __ SmiToPtrArrayOffset(r0, length); + __ add(too_far, feedback, r0); + __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ addi(pointer_reg, feedback, + Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag)); + + __ bind(&next_loop); + __ LoadP(cached_map, MemOperand(pointer_reg)); + __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); + __ cmp(receiver_map, cached_map); + __ bne(&prepare_next); + __ LoadP(handler, MemOperand(pointer_reg, kPointerSize)); + __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(ip); + + __ bind(&prepare_next); + __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2)); + __ cmp(pointer_reg, too_far); + __ blt(&next_loop); + + // We exhausted our array of map handler pairs. + __ b(miss); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ b(&compare_map); +} + + +static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver, + Register key, Register vector, Register slot, + Register weak_cell, Register scratch, + Label* miss) { + // feedback initially contains the feedback array + Label compare_smi_map; + Register receiver_map = scratch; + Register cached_map = weak_cell; + + // Move the weak map into the weak_cell register. + __ LoadP(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset)); + + // Receiver might not be a heap object. + __ JumpIfSmi(receiver, &compare_smi_map); + __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ cmp(cached_map, receiver_map); + __ bne(miss); + + Register handler = weak_cell; + __ SmiToPtrArrayOffset(r0, slot); + __ add(handler, vector, r0); + __ LoadP(handler, + FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize)); + __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(ip); + + // In microbenchmarks, it made sense to unroll this code so that the call to + // the handler is duplicated for a HeapObject receiver and a Smi receiver. + __ bind(&compare_smi_map); + __ CompareRoot(weak_cell, Heap::kHeapNumberMapRootIndex); + __ bne(miss); + __ SmiToPtrArrayOffset(r0, slot); + __ add(handler, vector, r0); + __ LoadP(handler, + FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize)); + __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(ip); +} + + +void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { + Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4 + Register name = VectorLoadICDescriptor::NameRegister(); // r5 + Register vector = VectorLoadICDescriptor::VectorRegister(); // r6 + Register slot = VectorLoadICDescriptor::SlotRegister(); // r3 + Register feedback = r7; + Register scratch1 = r8; + + __ SmiToPtrArrayOffset(r0, slot); + __ add(feedback, vector, r0); + __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Is it a weak cell? + Label try_array; + Label not_array, smi_key, key_okay, miss; + __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex); + __ bne(&try_array); + HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1, + &miss); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); + __ bne(¬_array); + HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, r9, + r10, true, &miss); + + __ bind(¬_array); + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ bne(&miss); + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::LOAD_IC)); + masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags, + false, receiver, name, feedback, + scratch1, r9, r10); + + __ bind(&miss); + LoadIC::GenerateMiss(masm); +} + + +void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) { + GenerateImpl(masm, false); +} + + +void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) { + GenerateImpl(masm, true); +} + + +void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { + Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4 + Register key = VectorLoadICDescriptor::NameRegister(); // r5 + Register vector = VectorLoadICDescriptor::VectorRegister(); // r6 + Register slot = VectorLoadICDescriptor::SlotRegister(); // r3 + Register feedback = r7; + Register scratch1 = r8; + + __ SmiToPtrArrayOffset(r0, slot); + __ add(feedback, vector, r0); + __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Is it a weak cell? + Label try_array; + Label not_array, smi_key, key_okay, miss; + __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex); + __ bne(&try_array); + HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1, + &miss); + + __ bind(&try_array); + // Is it a fixed array? + __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); + __ bne(¬_array); + + // We have a polymorphic element handler. + Label polymorphic, try_poly_name; + __ bind(&polymorphic); + HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r9, + r10, true, &miss); + + __ bind(¬_array); + // Is it generic? + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ bne(&try_poly_name); + Handle<Code> megamorphic_stub = + KeyedLoadIC::ChooseMegamorphicStub(masm->isolate()); + __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ cmp(key, feedback); + __ bne(&miss); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ SmiToPtrArrayOffset(r0, slot); + __ add(feedback, vector, r0); + __ LoadP(feedback, + FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r9, + r10, false, &miss); + + __ bind(&miss); + KeyedLoadIC::GenerateMiss(masm); +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { PredictableCodeSizeScope predictable(masm, @@ -4802,6 +5098,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { // -- r3 : argc (only if argument_count() == ANY) // -- r4 : constructor // -- r5 : AllocationSite or undefined + // -- r6 : original constructor // -- sp[0] : return address // -- sp[4] : last argument // ----------------------------------- @@ -4822,6 +5119,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ AssertUndefinedOrAllocationSite(r5, r7); } + Label subclassing; + __ cmp(r6, r4); + __ bne(&subclassing); + Label no_info; // Get the elements kind and case on that. __ CompareRoot(r5, Heap::kUndefinedValueRootIndex); @@ -4835,6 +5136,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ bind(&no_info); GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); + + __ bind(&subclassing); + __ push(r4); + __ push(r6); + + // Adjust argc. + switch (argument_count()) { + case ANY: + case MORE_THAN_ONE: + __ addi(r3, r3, Operand(2)); + break; + case NONE: + __ li(r3, Operand(2)); + break; + case ONE: + __ li(r3, Operand(3)); + break; + } + + __ JumpToExternalReference( + ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate())); } @@ -4997,7 +5319,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, } Label promote_scheduled_exception; - Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; Label return_value_loaded; @@ -5019,15 +5340,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ cmp(r15, r0); __ bne(&delete_allocated_handles); - // Check if the function scheduled an exception. + // Leave the API exit frame. __ bind(&leave_exit_frame); - __ LoadRoot(r14, Heap::kTheHoleValueRootIndex); - __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate))); - __ LoadP(r15, MemOperand(r15)); - __ cmp(r14, r15); - __ bne(&promote_scheduled_exception); - __ bind(&exception_handled); - bool restore_context = context_restore_operand != NULL; if (restore_context) { __ LoadP(cp, *context_restore_operand); @@ -5039,15 +5353,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ mov(r14, Operand(stack_space)); } __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL); + + // Check if the function scheduled an exception. + __ LoadRoot(r14, Heap::kTheHoleValueRootIndex); + __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate))); + __ LoadP(r15, MemOperand(r15)); + __ cmp(r14, r15); + __ bne(&promote_scheduled_exception); + __ blr(); + // Re-throw by promoting a scheduled exception. __ bind(&promote_scheduled_exception); - { - FrameScope frame(masm, StackFrame::INTERNAL); - __ CallExternalReference( - ExternalReference(Runtime::kPromoteScheduledException, isolate), 0); - } - __ jmp(&exception_handled); + __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); // HandleScope limit has changed. Delete allocated extensions. __ bind(&delete_allocated_handles); diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc index 93d32c2bc6..c0398aebed 100644 --- a/deps/v8/src/ppc/codegen-ppc.cc +++ b/deps/v8/src/ppc/codegen-ppc.cc @@ -646,9 +646,9 @@ void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { - ConstantPoolArray* constant_pool = NULL; - Address target_address = Assembler::target_address_at( - sequence + kCodeAgingTargetDelta, constant_pool); + Code* code = NULL; + Address target_address = + Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code); Code* stub = GetCodeFromTargetAddress(target_address); GetCodeAgeAndParity(stub, age, parity); } diff --git a/deps/v8/src/ppc/debug-ppc.cc b/deps/v8/src/ppc/debug-ppc.cc index 8106853134..f59f6371de 100644 --- a/deps/v8/src/ppc/debug-ppc.cc +++ b/deps/v8/src/ppc/debug-ppc.cc @@ -12,12 +12,7 @@ namespace v8 { namespace internal { -bool BreakLocationIterator::IsDebugBreakAtReturn() { - return Debug::IsDebugBreakAtReturn(rinfo()); -} - - -void BreakLocationIterator::SetDebugBreakAtReturn() { +void BreakLocation::SetDebugBreakAtReturn() { // Patch the code changing the return from JS function sequence from // // LeaveFrame @@ -31,7 +26,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() { // blrl // bkpt // - CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions); + CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions); Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm()); patcher.masm()->mov( v8::internal::r0, @@ -45,29 +40,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() { } -// Restore the JS frame exit code. -void BreakLocationIterator::ClearDebugBreakAtReturn() { - rinfo()->PatchCode(original_rinfo()->pc(), - Assembler::kJSReturnSequenceInstructions); -} - - -// A debug break in the frame exit code is identified by the JS frame exit code -// having been patched with a call instruction. -bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { - DCHECK(RelocInfo::IsJSReturn(rinfo->rmode())); - return rinfo->IsPatchedReturnSequence(); -} - - -bool BreakLocationIterator::IsDebugBreakAtSlot() { - DCHECK(IsDebugBreakSlot()); - // Check whether the debug break slot instructions have been patched. - return rinfo()->IsPatchedDebugBreakSlotSequence(); -} - - -void BreakLocationIterator::SetDebugBreakAtSlot() { +void BreakLocation::SetDebugBreakAtSlot() { DCHECK(IsDebugBreakSlot()); // Patch the code changing the debug break slot code from // @@ -83,7 +56,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() { // mtlr r0 // blrl // - CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions); + CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions); Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm()); patcher.masm()->mov( v8::internal::r0, @@ -94,13 +67,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() { } -void BreakLocationIterator::ClearDebugBreakAtSlot() { - DCHECK(IsDebugBreakSlot()); - rinfo()->PatchCode(original_rinfo()->pc(), - Assembler::kDebugBreakSlotInstructions); -} - - #define __ ACCESS_MASM(masm) @@ -108,7 +74,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, RegList non_object_regs) { { - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); + FrameScope scope(masm, StackFrame::INTERNAL); // Load padding words on stack. __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue)); @@ -317,8 +283,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset - kPointerSize)); - // Pop return address, frame and constant pool pointer (if - // FLAG_enable_ool_constant_pool). + // Pop return address and frame __ LeaveFrame(StackFrame::INTERNAL); // Load context from the function. diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc index ac1504c020..74c88e37a7 100644 --- a/deps/v8/src/ppc/deoptimizer-ppc.cc +++ b/deps/v8/src/ppc/deoptimizer-ppc.cc @@ -142,7 +142,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { // This code tries to be close to ia32 code so that any changes can be // easily ported. -void Deoptimizer::EntryGenerator::Generate() { +void Deoptimizer::TableEntryGenerator::Generate() { GeneratePrologue(); // Unlike on ARM we don't save all the registers, just the useful ones. @@ -172,6 +172,9 @@ void Deoptimizer::EntryGenerator::Generate() { } } + __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); + __ StoreP(fp, MemOperand(ip)); + const int kSavedRegistersAreaSize = (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; @@ -353,13 +356,8 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { -#if V8_OOL_CONSTANT_POOL - DCHECK(FLAG_enable_ool_constant_pool); - SetFrameSlot(offset, value); -#else // No out-of-line constant pool support. UNREACHABLE(); -#endif } diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc index 3472828eee..2486741350 100644 --- a/deps/v8/src/ppc/disasm-ppc.cc +++ b/deps/v8/src/ppc/disasm-ppc.cc @@ -988,6 +988,15 @@ int Decoder::InstructionDecode(byte* instr_ptr) { out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ", instr->InstructionBits()); +#if ABI_USES_FUNCTION_DESCRIPTORS + // The first field will be identified as a jump table entry. We emit the rest + // of the structure as zero, so just skip past them. + if (instr->InstructionBits() == 0) { + Format(instr, "constant"); + return Instruction::kInstrSize; + } +#endif + switch (instr->OpcodeValue() << 26) { case TWI: { PrintSoftwareInterrupt(instr->SvcValue()); diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc index 4b52882b0c..00af7c9b01 100644 --- a/deps/v8/src/ppc/frames-ppc.cc +++ b/deps/v8/src/ppc/frames-ppc.cc @@ -21,38 +21,22 @@ namespace internal { Register JavaScriptFrame::fp_register() { return v8::internal::fp; } Register JavaScriptFrame::context_register() { return cp; } Register JavaScriptFrame::constant_pool_pointer_register() { -#if V8_OOL_CONSTANT_POOL - DCHECK(FLAG_enable_ool_constant_pool); - return kConstantPoolRegister; -#else UNREACHABLE(); return no_reg; -#endif } Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } Register StubFailureTrampolineFrame::context_register() { return cp; } Register StubFailureTrampolineFrame::constant_pool_pointer_register() { -#if V8_OOL_CONSTANT_POOL - DCHECK(FLAG_enable_ool_constant_pool); - return kConstantPoolRegister; -#else UNREACHABLE(); return no_reg; -#endif } Object*& ExitFrame::constant_pool_slot() const { -#if V8_OOL_CONSTANT_POOL - DCHECK(FLAG_enable_ool_constant_pool); - const int offset = ExitFrameConstants::kConstantPoolOffset; - return Memory::Object_at(fp() + offset); -#else UNREACHABLE(); return Memory::Object_at(NULL); -#endif } } } // namespace v8::internal diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h index f00fa668a8..40a68b3a37 100644 --- a/deps/v8/src/ppc/frames-ppc.h +++ b/deps/v8/src/ppc/frames-ppc.h @@ -57,15 +57,8 @@ const int kNumCalleeSaved = 18; // Number of registers for which space is reserved in safepoints. Must be a // multiple of 8. -// TODO(regis): Only 8 registers may actually be sufficient. Revisit. const int kNumSafepointRegisters = 32; -// Define the list of registers actually saved at safepoints. -// Note that the number of saved registers may be smaller than the reserved -// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. -const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; -const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; - // The following constants describe the stack frame linkage area as // defined by the ABI. Note that kNumRequiredStackFrameSlots must // satisfy alignment requirements (rounding up if required). @@ -123,13 +116,8 @@ class EntryFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic { public: -#if V8_OOL_CONSTANT_POOL - static const int kFrameSize = 3 * kPointerSize; - static const int kConstantPoolOffset = -3 * kPointerSize; -#else static const int kFrameSize = 2 * kPointerSize; static const int kConstantPoolOffset = 0; // Not used. -#endif static const int kCodeOffset = -2 * kPointerSize; static const int kSPOffset = -1 * kPointerSize; @@ -193,9 +181,6 @@ inline Object* JavaScriptFrame::function_slot_object() const { } -inline void StackHandler::SetFp(Address slot, Address fp) { - Memory::Address_at(slot) = fp; -} } } // namespace v8::internal diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/ppc/full-codegen-ppc.cc index 26503c8cd5..a12f17eba3 100644 --- a/deps/v8/src/ppc/full-codegen-ppc.cc +++ b/deps/v8/src/ppc/full-codegen-ppc.cc @@ -104,7 +104,8 @@ class JumpPatchSite BASE_EMBEDDED { void FullCodeGenerator::Generate() { CompilationInfo* info = info_; handler_table_ = - isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); + Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray( + HandlerTable::LengthForRange(function()->handler_count()), TENURED)); profiling_counter_ = isolate()->factory()->NewCell( Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); @@ -200,9 +201,9 @@ void FullCodeGenerator::Generate() { // Argument to NewContext is the function, which is still in r4. Comment cmnt(masm_, "[ Allocate context"); bool need_write_barrier = true; - if (FLAG_harmony_scoping && info->scope()->is_script_scope()) { + if (info->scope()->is_script_scope()) { __ push(r4); - __ Push(info->scope()->GetScopeInfo()); + __ Push(info->scope()->GetScopeInfo(info->isolate())); __ CallRuntime(Runtime::kNewScriptContext, 2); } else if (heap_slots <= FastNewContextStub::kMaximumSlots) { FastNewContextStub stub(isolate(), heap_slots); @@ -245,6 +246,35 @@ void FullCodeGenerator::Generate() { } } + ArgumentsAccessStub::HasNewTarget has_new_target = + IsSubclassConstructor(info->function()->kind()) + ? ArgumentsAccessStub::HAS_NEW_TARGET + : ArgumentsAccessStub::NO_NEW_TARGET; + + // Possibly allocate RestParameters + int rest_index; + Variable* rest_param = scope()->rest_parameter(&rest_index); + if (rest_param) { + Comment cmnt(masm_, "[ Allocate rest parameter array"); + + int num_parameters = info->scope()->num_parameters(); + int offset = num_parameters * kPointerSize; + if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) { + --num_parameters; + ++rest_index; + } + + __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset)); + __ mov(r5, Operand(Smi::FromInt(num_parameters))); + __ mov(r4, Operand(Smi::FromInt(rest_index))); + __ Push(r6, r5, r4); + + RestParamAccessStub stub(isolate()); + __ CallStub(&stub); + + SetVar(rest_param, r3, r4, r5); + } + Variable* arguments = scope()->arguments(); if (arguments != NULL) { // Function uses arguments object. @@ -267,14 +297,14 @@ void FullCodeGenerator::Generate() { // The stub will rewrite receiever and parameter count if the previous // stack frame was an arguments adapter frame. ArgumentsAccessStub::Type type; - if (is_strict(language_mode())) { + if (is_strict(language_mode()) || !is_simple_parameter_list()) { type = ArgumentsAccessStub::NEW_STRICT; } else if (function()->has_duplicate_parameters()) { type = ArgumentsAccessStub::NEW_SLOPPY_SLOW; } else { type = ArgumentsAccessStub::NEW_SLOPPY_FAST; } - ArgumentsAccessStub stub(isolate(), type); + ArgumentsAccessStub stub(isolate(), type, has_new_target); __ CallStub(&stub); SetVar(arguments, r3, r4, r5); @@ -432,7 +462,11 @@ void FullCodeGenerator::EmitReturnSequence() { // sequence. { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; + int32_t arg_count = info_->scope()->num_parameters() + 1; + if (IsSubclassConstructor(info_->function()->kind())) { + arg_count++; + } + int32_t sp_delta = arg_count * kPointerSize; CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); __ RecordJSReturn(); int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); @@ -440,9 +474,7 @@ void FullCodeGenerator::EmitReturnSequence() { // With 64bit we may need nop() instructions to ensure we have // enough space to SetDebugBreakAtReturn() if (is_int16(sp_delta)) { -#if !V8_OOL_CONSTANT_POOL masm_->nop(); -#endif masm_->nop(); } #endif @@ -1457,7 +1489,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { __ mov(VectorLoadICDescriptor::SlotRegister(), Operand(SmiFromSlot(proxy->VariableFeedbackSlot()))); } - CallLoadIC(CONTEXTUAL); + CallGlobalLoadIC(var->name()); context()->Plug(r3); break; } @@ -2101,7 +2133,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) { // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } __ bind(&l_catch); - handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw" __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter __ Push(load_name, r6, r3); // "throw", iter, except @@ -2112,16 +2143,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) { // re-boxing. __ bind(&l_try); __ pop(r3); // result - __ PushTryHandler(StackHandler::CATCH, expr->index()); - const int handler_size = StackHandlerConstants::kSize; + EnterTryBlock(expr->index(), &l_catch); + const int try_block_size = TryCatch::kElementCount * kPointerSize; __ push(r3); // result __ b(&l_suspend); __ bind(&l_continuation); __ b(&l_resume); __ bind(&l_suspend); - const int generator_object_depth = kPointerSize + handler_size; + const int generator_object_depth = kPointerSize + try_block_size; __ LoadP(r3, MemOperand(sp, generator_object_depth)); __ push(r3); // g + __ Push(Smi::FromInt(expr->index())); // handler-index DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); __ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos())); __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset), @@ -2130,12 +2162,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) { __ mr(r4, cp); __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5, kLRHasBeenSaved, kDontSaveFPRegs); - __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2); __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ pop(r3); // result EmitReturnSequence(); __ bind(&l_resume); // received in r3 - __ PopTryHandler(); + ExitTryBlock(expr->index()); // receiver = iter; f = 'next'; arg = received; __ bind(&l_next); @@ -2256,13 +2288,7 @@ void FullCodeGenerator::EmitGeneratorResume( Label slow_resume; __ bne(&slow_resume, cr0); __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset)); -#if V8_OOL_CONSTANT_POOL { - ConstantPoolUnavailableScope constant_pool_unavailable(masm_); - // Load the new code object's constant pool pointer. - __ LoadP(kConstantPoolRegister, - MemOperand(ip, Code::kConstantPoolOffset - Code::kHeaderSize)); -#endif __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset)); __ SmiUntag(r5); __ add(ip, ip, r5); @@ -2272,9 +2298,7 @@ void FullCodeGenerator::EmitGeneratorResume( r0); __ Jump(ip); __ bind(&slow_resume); -#if V8_OOL_CONSTANT_POOL } -#endif } else { __ beq(&call_resume, cr0); } @@ -2538,6 +2562,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) { } __ push(scratch); EmitPropertyKey(property, lit->GetIdForProperty(i)); + + // The static prototype property is read only. We handle the non computed + // property name case in the parser. Since this is the only case where we + // need to check for an own read only property we special case this so we do + // not need to do this for every property. + if (property->is_static() && property->is_computed_name()) { + __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1); + __ push(r3); + } + VisitForStackValue(value); EmitSetHomeObjectIfNeeded(value, 2); @@ -2681,25 +2715,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand()); CallStoreIC(); - } else if (op == Token::INIT_CONST_LEGACY) { - // Const initializers need a write barrier. - DCHECK(!var->IsParameter()); // No const parameters. - if (var->IsLookupSlot()) { - __ push(r3); - __ mov(r3, Operand(var->name())); - __ Push(cp, r3); // Context and name. - __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); - } else { - DCHECK(var->IsStackAllocated() || var->IsContextSlot()); - Label skip; - MemOperand location = VarOperand(var, r4); - __ LoadP(r5, location); - __ CompareRoot(r5, Heap::kTheHoleValueRootIndex); - __ bne(&skip); - EmitStoreToStackLocalOrContextSlot(var, location); - __ bind(&skip); - } - } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. DCHECK(!var->IsLookupSlot()); @@ -2716,6 +2731,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { __ bind(&assign); EmitStoreToStackLocalOrContextSlot(var, location); + } else if (var->mode() == CONST && op != Token::INIT_CONST) { + // Assignment to const variable needs a write barrier. + DCHECK(!var->IsLookupSlot()); + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label const_error; + MemOperand location = VarOperand(var, r4); + __ LoadP(r6, location); + __ CompareRoot(r6, Heap::kTheHoleValueRootIndex); + __ bne(&const_error); + __ mov(r6, Operand(var->name())); + __ push(r6); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + __ bind(&const_error); + __ CallRuntime(Runtime::kThrowConstAssignError, 0); + } else if (!var->is_const_mode() || op == Token::INIT_CONST) { if (var->IsLookupSlot()) { // Assignment to var. @@ -2737,8 +2767,32 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { } EmitStoreToStackLocalOrContextSlot(var, location); } - } else if (IsSignallingAssignmentToConst(var, op, language_mode())) { - __ CallRuntime(Runtime::kThrowConstAssignError, 0); + } else if (op == Token::INIT_CONST_LEGACY) { + // Const initializers need a write barrier. + DCHECK(var->mode() == CONST_LEGACY); + DCHECK(!var->IsParameter()); // No const parameters. + if (var->IsLookupSlot()) { + __ push(r3); + __ mov(r3, Operand(var->name())); + __ Push(cp, r3); // Context and name. + __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3); + } else { + DCHECK(var->IsStackAllocated() || var->IsContextSlot()); + Label skip; + MemOperand location = VarOperand(var, r4); + __ LoadP(r5, location); + __ CompareRoot(r5, Heap::kTheHoleValueRootIndex); + __ bne(&skip); + EmitStoreToStackLocalOrContextSlot(var, location); + __ bind(&skip); + } + + } else { + DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY); + if (is_strict(language_mode())) { + __ CallRuntime(Runtime::kThrowConstAssignError, 0); + } + // Silently ignore store in sloppy mode. } } @@ -2865,7 +2919,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { } // Push undefined as receiver. This is patched in the method prologue if it // is a sloppy mode method. - __ Push(isolate()->factory()->undefined_value()); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + __ push(r0); } else { // Load the function from the receiver. DCHECK(callee->IsProperty()); @@ -2874,8 +2929,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { EmitNamedPropertyLoad(callee->AsProperty()); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); // Push the target function under the receiver. - __ LoadP(ip, MemOperand(sp, 0)); - __ push(ip); + __ LoadP(r0, MemOperand(sp, 0)); + __ push(r0); __ StoreP(r3, MemOperand(sp, kPointerSize)); } @@ -3033,8 +3088,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { } -void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) { - DCHECK(super_ref != NULL); +void FullCodeGenerator::EmitLoadSuperConstructor() { __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ Push(r3); __ CallRuntime(Runtime::kGetPrototype, 1); @@ -3225,20 +3279,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) { - SuperReference* super_ref = expr->expression()->AsSuperReference(); - EmitLoadSuperConstructor(super_ref); - __ push(result_register()); - - Variable* this_var = super_ref->this_var()->var(); + Variable* new_target_var = scope()->DeclarationScope()->new_target_var(); + GetVar(result_register(), new_target_var); + __ Push(result_register()); - GetVar(r3, this_var); - __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); - Label uninitialized_this; - __ beq(&uninitialized_this); - __ mov(r3, Operand(this_var->name())); - __ push(r3); - __ CallRuntime(Runtime::kThrowReferenceError, 1); - __ bind(&uninitialized_this); + EmitLoadSuperConstructor(); + __ push(result_register()); // Push the arguments ("left-to-right") on the stack. ZoneList<Expression*>* args = expr->arguments(); @@ -3268,12 +3314,24 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) { __ Move(r5, FeedbackVector()); __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot())); - // TODO(dslomov): use a different stub and propagate new.target. - CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET); + CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET); __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + __ Drop(1); + RecordJSReturnSite(expr); + SuperReference* super_ref = expr->expression()->AsSuperReference(); + Variable* this_var = super_ref->this_var()->var(); + GetVar(r4, this_var); + __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); + Label uninitialized_this; + __ beq(&uninitialized_this); + __ mov(r4, Operand(this_var->name())); + __ push(r4); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + __ bind(&uninitialized_this); + EmitVariableAssignment(this_var, Token::INIT_CONST); context()->Plug(r3); } @@ -3742,8 +3800,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); // Check if the constructor in the map is a JS function. - __ LoadP(r3, FieldMemOperand(r3, Map::kConstructorOffset)); - __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE); + Register instance_type = r5; + __ GetMapConstructor(r3, r3, r4, instance_type); + __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE)); __ bne(&non_function_constructor); // r3 now contains the constructor function. Grab the @@ -4036,7 +4095,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { __ b(&done); NopRuntimeCallHelper call_helper; - generator.GenerateSlow(masm_, call_helper); + generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper); __ bind(&done); context()->Plug(result); @@ -4078,7 +4137,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { __ b(&done); NopRuntimeCallHelper call_helper; - generator.GenerateSlow(masm_, call_helper); + generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper); __ bind(&done); context()->Plug(result); @@ -4142,6 +4201,61 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { } +void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) { + Variable* new_target_var = scope()->DeclarationScope()->new_target_var(); + GetVar(result_register(), new_target_var); + __ Push(result_register()); + + EmitLoadSuperConstructor(); + __ mr(r4, result_register()); + __ Push(r4); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, args_set_up, runtime; + __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset)); + __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); + __ beq(&adaptor_frame); + + // default constructor has no arguments, so no adaptor frame means no args. + __ li(r3, Operand::Zero()); + __ b(&args_set_up); + + // Copy arguments from adaptor frame. + { + __ bind(&adaptor_frame); + __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ SmiUntag(r3); + + // Subtract 1 from arguments count, for new.target. + __ subi(r3, r3, Operand(1)); + + // Get arguments pointer in r5. + __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2)); + __ add(r5, r5, r0); + __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); + + Label loop; + __ mtctr(r3); + __ bind(&loop); + // Pre-decrement in order to skip receiver. + __ LoadPU(r6, MemOperand(r5, -kPointerSize)); + __ Push(r6); + __ bdnz(&loop); + } + + __ bind(&args_set_up); + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); + + CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL); + __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + + __ Drop(1); + + context()->Plug(result_register()); +} + + void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { RegExpConstructResultStub stub(isolate()); ZoneList<Expression*>* args = expr->arguments(); @@ -4198,7 +4312,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { __ bind(¬_found); // Call runtime to perform the lookup. __ Push(cache, key); - __ CallRuntime(Runtime::kGetFromCache, 2); + __ CallRuntime(Runtime::kGetFromCacheRT, 2); __ bind(&done); context()->Plug(r3); @@ -4505,18 +4619,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { - if (expr->function() != NULL && - expr->function()->intrinsic_type == Runtime::INLINE) { - Comment cmnt(masm_, "[ InlineRuntimeCall"); - EmitInlineRuntimeCall(expr); - return; - } - - Comment cmnt(masm_, "[ CallRuntime"); ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); if (expr->is_jsruntime()) { + Comment cmnt(masm_, "[ CallRuntime"); // Push the builtins object as the receiver. Register receiver = LoadDescriptor::ReceiverRegister(); __ LoadP(receiver, GlobalObjectOperand()); @@ -4540,7 +4647,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { __ StoreP(r3, MemOperand(sp, kPointerSize)); // Push the arguments ("left-to-right"). - int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { VisitForStackValue(args->at(i)); } @@ -4555,15 +4661,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); context()->DropAndPlug(1, r3); + } else { - // Push the arguments ("left-to-right"). - for (int i = 0; i < arg_count; i++) { - VisitForStackValue(args->at(i)); - } + const Runtime::Function* function = expr->function(); + switch (function->function_id) { +#define CALL_INTRINSIC_GENERATOR(Name) \ + case Runtime::kInline##Name: { \ + Comment cmnt(masm_, "[ Inline" #Name); \ + return Emit##Name(expr); \ + } + FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR) +#undef CALL_INTRINSIC_GENERATOR + default: { + Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic"); + // Push the arguments ("left-to-right"). + for (int i = 0; i < arg_count; i++) { + VisitForStackValue(args->at(i)); + } - // Call the C runtime function. - __ CallRuntime(expr->function(), arg_count); - context()->Plug(r3); + // Call the C runtime function. + __ CallRuntime(expr->function(), arg_count); + context()->Plug(r3); + } + } } } @@ -5206,19 +5326,6 @@ void FullCodeGenerator::EnterFinallyBlock() { __ mov(ip, Operand(pending_message_obj)); __ LoadP(r4, MemOperand(ip)); __ push(r4); - - ExternalReference has_pending_message = - ExternalReference::address_of_has_pending_message(isolate()); - __ mov(ip, Operand(has_pending_message)); - __ lbz(r4, MemOperand(ip)); - __ SmiTag(r4); - __ push(r4); - - ExternalReference pending_message_script = - ExternalReference::address_of_pending_message_script(isolate()); - __ mov(ip, Operand(pending_message_script)); - __ LoadP(r4, MemOperand(ip)); - __ push(r4); } @@ -5226,19 +5333,6 @@ void FullCodeGenerator::ExitFinallyBlock() { DCHECK(!result_register().is(r4)); // Restore pending message from stack. __ pop(r4); - ExternalReference pending_message_script = - ExternalReference::address_of_pending_message_script(isolate()); - __ mov(ip, Operand(pending_message_script)); - __ StoreP(r4, MemOperand(ip)); - - __ pop(r4); - __ SmiUntag(r4); - ExternalReference has_pending_message = - ExternalReference::address_of_has_pending_message(isolate()); - __ mov(ip, Operand(has_pending_message)); - __ stb(r4, MemOperand(ip)); - - __ pop(r4); ExternalReference pending_message_obj = ExternalReference::address_of_pending_message_obj(isolate()); __ mov(ip, Operand(pending_message_obj)); @@ -5259,32 +5353,6 @@ void FullCodeGenerator::ExitFinallyBlock() { #undef __ -#define __ ACCESS_MASM(masm()) - -FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( - int* stack_depth, int* context_length) { - // The macros used here must preserve the result register. - - // Because the handler block contains the context of the finally - // code, we can restore it directly from there for the finally code - // rather than iteratively unwinding contexts via their previous - // links. - __ Drop(*stack_depth); // Down to the handler block. - if (*context_length > 0) { - // Restore the context to its dedicated register and the stack. - __ LoadP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); - __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - } - __ PopTryHandler(); - __ b(finally_entry_, SetLK); - - *stack_depth = 0; - *context_length = 0; - return previous_; -} - -#undef __ - void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc, BackEdgeState target_state, diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc index f82d85ded0..01d0150340 100644 --- a/deps/v8/src/ppc/interface-descriptors-ppc.cc +++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc @@ -227,6 +227,12 @@ void InternalArrayConstructorDescriptor::Initialize( } +void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) { + Register registers[] = {cp, r4, r3}; + data->Initialize(arraysize(registers), registers, NULL); +} + + void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) { Register registers[] = {cp, r3}; data->Initialize(arraysize(registers), registers, NULL); diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc index 4d17189c84..f6147c2196 100644 --- a/deps/v8/src/ppc/lithium-codegen-ppc.cc +++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc @@ -7,6 +7,7 @@ #include "src/base/bits.h" #include "src/code-factory.h" #include "src/code-stubs.h" +#include "src/cpu-profiler.h" #include "src/hydrogen-osr.h" #include "src/ic/ic.h" #include "src/ic/stub-cache.h" @@ -109,7 +110,7 @@ bool LCodeGen::GeneratePrologue() { // r4: Callee's JS function. // cp: Callee's context. - // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) + // pp: Callee's constant pool pointer (if enabled) // fp: Caller's frame pointer. // lr: Caller's pc. // ip: Our own function entry (required by the prologue) @@ -117,7 +118,7 @@ bool LCodeGen::GeneratePrologue() { // Sloppy mode functions and builtins need to replace the receiver with the // global proxy when called as functions (without an explicit receiver // object). - if (info_->this_has_uses() && is_sloppy(info_->language_mode()) && + if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) && !info_->is_native()) { Label ok; int receiver_offset = info_->scope()->num_parameters() * kPointerSize; @@ -336,49 +337,39 @@ bool LCodeGen::GenerateJumpTable() { if (table_entry->needs_frame) { DCHECK(!info()->saves_caller_doubles()); - if (needs_frame.is_bound()) { - __ b(&needs_frame); - } else { - __ bind(&needs_frame); - Comment(";;; call deopt with frame"); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - DCHECK(info()->IsStub()); - __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB)); - __ PushFixedFrame(ip); - __ addi(fp, sp, - Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); - __ bind(&call_deopt_entry); - // Add the base address to the offset previously loaded in - // entry_offset. - __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); - __ add(ip, entry_offset, ip); - __ Call(ip); - } + Comment(";;; call deopt with frame"); + __ PushFixedFrame(); + __ b(&needs_frame, SetLK); } else { - // The last entry can fall through into `call_deopt_entry`, avoiding a - // branch. - bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); - - if (need_branch) __ b(&call_deopt_entry); + __ b(&call_deopt_entry, SetLK); } + info()->LogDeoptCallPosition(masm()->pc_offset(), + table_entry->deopt_info.inlining_id); } - if (!call_deopt_entry.is_bound()) { - Comment(";;; call deopt"); - __ bind(&call_deopt_entry); + if (needs_frame.is_linked()) { + __ bind(&needs_frame); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + DCHECK(info()->IsStub()); + __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB)); + __ push(ip); + __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); + } - if (info()->saves_caller_doubles()) { - DCHECK(info()->IsStub()); - RestoreCallerDoubles(); - } + Comment(";;; call deopt"); + __ bind(&call_deopt_entry); - // Add the base address to the offset previously loaded in entry_offset. - __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); - __ add(ip, entry_offset, ip); - __ Call(ip); + if (info()->saves_caller_doubles()) { + DCHECK(info()->IsStub()); + RestoreCallerDoubles(); } + + // Add the base address to the offset previously loaded in entry_offset. + __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); + __ add(ip, entry_offset, ip); + __ Jump(ip); } // The deoptimization jump table is the last part of the instruction @@ -812,14 +803,15 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); } - Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(), - instr->Mnemonic(), deopt_reason); + Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason); + DCHECK(info()->IsStub() || frame_is_built_); // Go through jump table if we need to handle condition, build frame, or // restore caller doubles. if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { DeoptComment(deopt_info); __ Call(entry, RelocInfo::RUNTIME_ENTRY); + info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id); } else { Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, !frame_is_built_); @@ -941,12 +933,6 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, safepoint.DefinePointerRegister(ToRegister(pointer), zone()); } } -#if V8_OOL_CONSTANT_POOL - if (kind & Safepoint::kWithRegisters) { - // Register always contains a pointer to the constant pool. - safepoint.DefinePointerRegister(kConstantPoolRegister, zone()); - } -#endif } @@ -2787,10 +2773,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. - __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset)); + Register instance_type = ip; + __ GetMapConstructor(temp, temp, temp2, instance_type); // Objects with a non-function constructor have class 'Object'. - __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); + __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE)); if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) { __ bne(is_true); } else { @@ -2898,7 +2885,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register map = temp; __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); { - // Block constant pool emission to ensure the positions of instructions are + // Block trampoline emission to ensure the positions of instructions are // as expected by the patcher. See InstanceofStub::Generate(). Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); __ bind(deferred->map_check()); // Label for calculating code patching. @@ -2906,10 +2893,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // root array to force relocation to be able to later patch with // the cached map. Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); - __ mov(ip, Operand(Handle<Object>(cell))); - __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); + __ mov(ip, Operand(cell)); + __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); __ cmp(map, ip); - __ bne(&cache_miss); + __ bc_short(ne, &cache_miss); // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch // with true or false. @@ -2957,22 +2944,25 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, LoadContextFromDeferred(instr->context()); __ Move(InstanceofStub::right(), instr->function()); - // Include instructions below in delta: mov + call = mov + (mov + 2) - static const int kAdditionalDelta = 2 * Assembler::kMovInstructions + 2; - int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - if (Assembler::kMovInstructions != 1 && - is_int16(delta * Instruction::kInstrSize)) { - // The following mov will be an li rather than a multi-instruction form - delta -= Assembler::kMovInstructions - 1; - } + Handle<Code> code = stub.GetCode(); + // Include instructions below in delta: bitwise_mov32 + call + int delta = (masm_->InstructionsGeneratedSince(map_check) + 2) * + Instruction::kInstrSize + + masm_->CallSize(code); // r8 is used to communicate the offset to the location of the map check. - __ mov(r8, Operand(delta * Instruction::kInstrSize)); + if (is_int16(delta)) { + delta -= Instruction::kInstrSize; + __ li(r8, Operand(delta)); + } else { + __ bitwise_mov32(r8, delta); + } + CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr, + RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); + DCHECK(delta / Instruction::kInstrSize == + masm_->InstructionsGeneratedSince(map_check)); } - CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, - RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - DCHECK(delta == masm_->InstructionsGeneratedSince(map_check)); LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); // Put the result value (r3) into the result register slot and @@ -3052,18 +3042,6 @@ void LCodeGen::DoReturn(LReturn* instr) { } -void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { - Register result = ToRegister(instr->result()); - __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); - __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset)); - if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(result, ip); - DeoptimizeIf(eq, instr, Deoptimizer::kHole); - } -} - - template <class T> void LCodeGen::EmitVectorLoadICRegisters(T* instr) { DCHECK(FLAG_vector_ics); @@ -3093,36 +3071,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); } ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; - Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code(); + Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, + PREMONOMORPHIC).code(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } -void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { - Register value = ToRegister(instr->value()); - Register cell = scratch0(); - - // Load the cell. - __ mov(cell, Operand(instr->hydrogen()->cell().handle())); - - // If the cell we are storing to contains the hole it could have - // been deleted from the property dictionary. In that case, we need - // to update the property details in the property dictionary to mark - // it as no longer deleted. - if (instr->hydrogen()->RequiresHoleCheck()) { - // We use a temp to check the payload (CompareRoot might clobber ip). - Register payload = ToRegister(instr->temp()); - __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset)); - __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(eq, instr, Deoptimizer::kHole); - } - - // Store the value. - __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0); - // Cells are always rescanned, so no write barrier here. -} - - void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); @@ -3235,7 +3189,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { if (FLAG_vector_ics) { EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); } - Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code(); + Handle<Code> ic = CodeFactory::LoadICInOptimizedCode( + isolate(), NOT_CONTEXTUAL, + instr->hydrogen()->initialization_state()).code(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -3590,7 +3546,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); } - Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code(); + Handle<Code> ic = + CodeFactory::KeyedLoadICInOptimizedCode( + isolate(), instr->hydrogen()->initialization_state()).code(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -4529,7 +4487,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); - Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode()); + Handle<Code> ic = + StoreIC::initialize_stub(isolate(), instr->language_mode(), + instr->hydrogen()->initialization_state()); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -4793,8 +4753,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); - Handle<Code> ic = - CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code(); + Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( + isolate(), instr->language_mode(), + instr->hydrogen()->initialization_state()).code(); CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -5508,7 +5469,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) { if (isolate()->heap()->InNewSpace(*object)) { Register reg = ToRegister(instr->value()); Handle<Cell> cell = isolate()->factory()->NewCell(object); - __ mov(ip, Operand(Handle<Object>(cell))); + __ mov(ip, Operand(cell)); __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); __ cmp(reg, ip); } else { @@ -5519,6 +5480,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) { void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { + Register temp = ToRegister(instr->temp()); { PushSafepointRegistersScope scope(this); __ push(object); @@ -5526,9 +5488,9 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); RecordSafepointWithRegisters(instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); - __ StoreToSafepointRegisterSlot(r3, scratch0()); + __ StoreToSafepointRegisterSlot(r3, temp); } - __ TestIfSmi(scratch0(), r0); + __ TestIfSmi(temp, r0); DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); } @@ -5560,17 +5522,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { return; } - Register map_reg = scratch0(); - - LOperand* input = instr->value(); - DCHECK(input->IsRegister()); - Register reg = ToRegister(input); + Register object = ToRegister(instr->value()); + Register map_reg = ToRegister(instr->temp()); - __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; if (instr->hydrogen()->HasMigrationTarget()) { - deferred = new (zone()) DeferredCheckMaps(this, instr, reg); + deferred = new (zone()) DeferredCheckMaps(this, instr, object); __ bind(deferred->check_maps()); } diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc index d54c7ec46a..ec75713480 100644 --- a/deps/v8/src/ppc/lithium-ppc.cc +++ b/deps/v8/src/ppc/lithium-ppc.cc @@ -2029,7 +2029,9 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps; LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = AssignEnvironment(new (zone()) LCheckMaps(value)); + LOperand* temp = TempRegister(); + LInstruction* result = + AssignEnvironment(new (zone()) LCheckMaps(value, temp)); if (instr->HasMigrationTarget()) { info()->MarkAsDeferredCalling(); result = AssignPointerMap(result); @@ -2096,14 +2098,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { } -LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { - LLoadGlobalCell* result = new (zone()) LLoadGlobalCell; - return instr->RequiresHoleCheck() - ? AssignEnvironment(DefineAsRegister(result)) - : DefineAsRegister(result); -} - - LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LOperand* context = UseFixed(instr->context(), cp); LOperand* global_object = @@ -2118,17 +2112,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { - LOperand* value = UseRegister(instr->value()); - // Use a temp to check the value in the cell in the case where we perform - // a hole check. - return instr->RequiresHoleCheck() - ? AssignEnvironment(new (zone()) - LStoreGlobalCell(value, TempRegister())) - : new (zone()) LStoreGlobalCell(value, NULL); -} - - LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LOperand* context = UseRegisterAtStart(instr->value()); LInstruction* result = diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h index ac7b505b98..5dce71cbf5 100644 --- a/deps/v8/src/ppc/lithium-ppc.h +++ b/deps/v8/src/ppc/lithium-ppc.h @@ -100,7 +100,6 @@ class LCodeGen; V(LoadRoot) \ V(LoadFieldByIndex) \ V(LoadFunctionPrototype) \ - V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ V(LoadKeyed) \ V(LoadKeyedGeneric) \ @@ -142,7 +141,6 @@ class LCodeGen; V(StoreCodeEntry) \ V(StoreContextSlot) \ V(StoreFrameContext) \ - V(StoreGlobalCell) \ V(StoreKeyed) \ V(StoreKeyedGeneric) \ V(StoreNamedField) \ @@ -1641,13 +1639,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> { }; -class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> { - public: - DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell") - DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell) -}; - - class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> { public: LLoadGlobalGeneric(LOperand* context, LOperand* global_object, @@ -1669,21 +1660,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> { }; -class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> { - public: - LStoreGlobalCell(LOperand* value, LOperand* temp) { - inputs_[0] = value; - temps_[0] = temp; - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp() { return temps_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") - DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell) -}; - - class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> { public: explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; } @@ -2319,11 +2295,15 @@ class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> { }; -class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> { +class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 1> { public: - explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; } + explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) { + inputs_[0] = value; + temps_[0] = temp; + } LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") DECLARE_HYDROGEN_ACCESSOR(CheckMaps) diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc index 2c9f7aa7a9..2f56d39c92 100644 --- a/deps/v8/src/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/ppc/macro-assembler-ppc.cc @@ -104,15 +104,14 @@ void MacroAssembler::CallJSEntry(Register target) { int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode, Condition cond) { - Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); - return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize; + return (2 + kMovInstructions) * kInstrSize; } int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond) { - return (2 + kMovInstructionsNoConstantPool) * kInstrSize; + return (2 + kMovInstructions) * kInstrSize; } @@ -274,6 +273,7 @@ void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index, Condition cond) { + DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); DCHECK(cond == al); StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0); } @@ -514,40 +514,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. void MacroAssembler::PushFixedFrame(Register marker_reg) { mflr(r0); -#if V8_OOL_CONSTANT_POOL - if (marker_reg.is_valid()) { - Push(r0, fp, kConstantPoolRegister, cp, marker_reg); - } else { - Push(r0, fp, kConstantPoolRegister, cp); - } -#else if (marker_reg.is_valid()) { Push(r0, fp, cp, marker_reg); } else { Push(r0, fp, cp); } -#endif } void MacroAssembler::PopFixedFrame(Register marker_reg) { -#if V8_OOL_CONSTANT_POOL - if (marker_reg.is_valid()) { - Pop(r0, fp, kConstantPoolRegister, cp, marker_reg); - } else { - Pop(r0, fp, kConstantPoolRegister, cp); - } -#else if (marker_reg.is_valid()) { Pop(r0, fp, cp, marker_reg); } else { Pop(r0, fp, cp); } -#endif mtlr(r0); } +const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable; +const int MacroAssembler::kNumSafepointSavedRegisters = + Register::kMaxNumAllocatableRegisters; + // Push and pop all registers that can hold pointers. void MacroAssembler::PushSafepointRegisters() { // Safepoints expect a block of kNumSafepointRegisters values on the @@ -664,41 +652,11 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, } -#if V8_OOL_CONSTANT_POOL -void MacroAssembler::LoadConstantPoolPointerRegister( - CodeObjectAccessMethod access_method, int ip_code_entry_delta) { - Register base; - int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize; - if (access_method == CAN_USE_IP) { - base = ip; - constant_pool_offset += ip_code_entry_delta; - } else { - DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE); - base = kConstantPoolRegister; - ConstantPoolUnavailableScope constant_pool_unavailable(this); - - // CheckBuffer() is called too frequently. This will pre-grow - // the buffer if needed to avoid spliting the relocation and instructions - EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize); - - uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset(); - mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE)); - } - LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset)); -} -#endif - - void MacroAssembler::StubPrologue(int prologue_offset) { LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB)); PushFixedFrame(r11); // Adjust FP to point to saved FP. addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); -#if V8_OOL_CONSTANT_POOL - // ip contains prologue address - LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset); - set_ool_constant_pool_available(true); -#endif } @@ -731,28 +689,13 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) { } } } -#if V8_OOL_CONSTANT_POOL - // ip contains prologue address - LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset); - set_ool_constant_pool_available(true); -#endif } void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { - if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) { - PushFixedFrame(); -#if V8_OOL_CONSTANT_POOL - // This path should not rely on ip containing code entry. - LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE); -#endif - LoadSmiLiteral(ip, Smi::FromInt(type)); - push(ip); - } else { - LoadSmiLiteral(ip, Smi::FromInt(type)); - PushFixedFrame(ip); - } + LoadSmiLiteral(ip, Smi::FromInt(type)); + PushFixedFrame(ip); // Adjust FP to point to saved FP. addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); @@ -762,24 +705,15 @@ void MacroAssembler::EnterFrame(StackFrame::Type type, int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { -#if V8_OOL_CONSTANT_POOL - ConstantPoolUnavailableScope constant_pool_unavailable(this); -#endif // r3: preserved // r4: preserved // r5: preserved // Drop the execution stack down to the frame pointer and restore - // the caller frame pointer, return address and constant pool pointer. + // the caller's state. int frame_ends; LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); -#if V8_OOL_CONSTANT_POOL - const int exitOffset = ExitFrameConstants::kConstantPoolOffset; - const int standardOffset = StandardFrameConstants::kConstantPoolOffset; - const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset); - LoadP(kConstantPoolRegister, MemOperand(fp, offset)); -#endif mtlr(r0); frame_ends = pc_offset(); Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0); @@ -826,10 +760,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { li(r8, Operand::Zero()); StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -#if V8_OOL_CONSTANT_POOL - StoreP(kConstantPoolRegister, - MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); -#endif mov(r8, Operand(CodeObject())); StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); @@ -899,9 +829,6 @@ int MacroAssembler::ActivationFrameAlignment() { void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context, bool argument_count_is_length) { -#if V8_OOL_CONSTANT_POOL - ConstantPoolUnavailableScope constant_pool_unavailable(this); -#endif // Optionally restore all double registers. if (save_doubles) { // Calculate the stack location of the saved doubles and restore them. @@ -1159,165 +1086,32 @@ void MacroAssembler::DebugBreak() { } -void MacroAssembler::PushTryHandler(StackHandler::Kind kind, - int handler_index) { +void MacroAssembler::PushStackHandler() { // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); - - // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available. - // We want the stack to look like - // sp -> NextOffset - // CodeObject - // state - // context - // frame pointer // Link the current handler as the next handler. + // Preserve r3-r7. mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); LoadP(r0, MemOperand(r8)); - StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize)); + push(r0); + // Set this new handler as the current one. StoreP(sp, MemOperand(r8)); - - if (kind == StackHandler::JS_ENTRY) { - li(r8, Operand::Zero()); // NULL frame pointer. - StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset)); - LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context. - StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset)); - } else { - // still not sure if fp is right - StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset)); - StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); - } - unsigned state = StackHandler::IndexField::encode(handler_index) | - StackHandler::KindField::encode(kind); - LoadIntLiteral(r8, state); - StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset)); - mov(r8, Operand(CodeObject())); - StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset)); } -void MacroAssembler::PopTryHandler() { +void MacroAssembler::PopStackHandler() { + STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + pop(r4); mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); - addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); StoreP(r4, MemOperand(ip)); } -// PPC - make use of ip as a temporary register -void MacroAssembler::JumpToHandlerEntry() { -// Compute the handler entry address and jump to it. The handler table is -// a fixed array of (smi-tagged) code offsets. -// r3 = exception, r4 = code object, r5 = state. -#if V8_OOL_CONSTANT_POOL - ConstantPoolUnavailableScope constant_pool_unavailable(this); - LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset)); -#endif - LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table. - addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index. - slwi(ip, r5, Operand(kPointerSizeLog2)); - add(ip, r6, ip); - LoadP(r5, MemOperand(ip)); // Smi-tagged offset. - addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. - SmiUntag(ip, r5); - add(r0, r4, ip); - mtctr(r0); - bctr(); -} - - -void MacroAssembler::Throw(Register value) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); - Label skip; - - // The exception is expected in r3. - if (!value.is(r3)) { - mr(r3, value); - } - // Drop the stack pointer to the top of the top handler. - mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); - LoadP(sp, MemOperand(r6)); - // Restore the next handler. - pop(r5); - StoreP(r5, MemOperand(r6)); - - // Get the code object (r4) and state (r5). Restore the context and frame - // pointer. - pop(r4); - pop(r5); - pop(cp); - pop(fp); - - // If the handler is a JS frame, restore the context to the frame. - // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp - // or cp. - cmpi(cp, Operand::Zero()); - beq(&skip); - StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - bind(&skip); - - JumpToHandlerEntry(); -} - - -void MacroAssembler::ThrowUncatchable(Register value) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); - - // The exception is expected in r3. - if (!value.is(r3)) { - mr(r3, value); - } - // Drop the stack pointer to the top of the top stack handler. - mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); - LoadP(sp, MemOperand(r6)); - - // Unwind the handlers until the ENTRY handler is found. - Label fetch_next, check_kind; - b(&check_kind); - bind(&fetch_next); - LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); - - bind(&check_kind); - STATIC_ASSERT(StackHandler::JS_ENTRY == 0); - LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset)); - andi(r0, r5, Operand(StackHandler::KindField::kMask)); - bne(&fetch_next, cr0); - - // Set the top handler address to next handler past the top ENTRY handler. - pop(r5); - StoreP(r5, MemOperand(r6)); - // Get the code object (r4) and state (r5). Clear the context and frame - // pointer (0 was saved in the handler). - pop(r4); - pop(r5); - pop(cp); - pop(fp); - - JumpToHandlerEntry(); -} - - void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { Label same_contexts; @@ -2107,6 +1901,20 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, } +void MacroAssembler::GetMapConstructor(Register result, Register map, + Register temp, Register temp2) { + Label done, loop; + LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset)); + bind(&loop); + JumpIfSmi(result, &done); + CompareObjectType(result, temp, temp2, MAP_TYPE); + bne(&done); + LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset)); + b(&loop); + bind(&done); +} + + void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, Register scratch, Label* miss, bool miss_on_bound_function) { @@ -2163,7 +1971,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, // Non-instance prototype: Fetch prototype from constructor field // in initial map. bind(&non_instance); - LoadP(result, FieldMemOperand(result, Map::kConstructorOffset)); + GetMapConstructor(result, result, scratch, ip); } // All done. @@ -3370,25 +3178,6 @@ void MacroAssembler::SetRelocatedValue(Register location, Register scratch, Register new_value) { lwz(scratch, MemOperand(location)); -#if V8_OOL_CONSTANT_POOL - if (emit_debug_code()) { -// Check that the instruction sequence is a load from the constant pool -#if V8_TARGET_ARCH_PPC64 - And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16))); - Cmpi(scratch, Operand(ADDI), r0); - Check(eq, kTheInstructionShouldBeALi); - lwz(scratch, MemOperand(location, kInstrSize)); -#endif - ExtractBitMask(scratch, scratch, 0x1f * B16); - cmpi(scratch, Operand(kConstantPoolRegister.code())); - Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); - // Scratch was clobbered. Restore it. - lwz(scratch, MemOperand(location)); - } - // Get the address of the constant and patch it. - andi(scratch, scratch, Operand(kImm16Mask)); - StorePX(new_value, MemOperand(kConstantPoolRegister, scratch)); -#else // This code assumes a FIXED_SEQUENCE for lis/ori // At this point scratch is a lis instruction. @@ -3465,7 +3254,6 @@ void MacroAssembler::SetRelocatedValue(Register location, Register scratch, #else FlushICache(location, 2 * kInstrSize, scratch); #endif -#endif } @@ -3473,24 +3261,6 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result, Register scratch) { lwz(result, MemOperand(location)); -#if V8_OOL_CONSTANT_POOL - if (emit_debug_code()) { -// Check that the instruction sequence is a load from the constant pool -#if V8_TARGET_ARCH_PPC64 - And(result, result, Operand(kOpcodeMask | (0x1f * B16))); - Cmpi(result, Operand(ADDI), r0); - Check(eq, kTheInstructionShouldBeALi); - lwz(result, MemOperand(location, kInstrSize)); -#endif - ExtractBitMask(result, result, 0x1f * B16); - cmpi(result, Operand(kConstantPoolRegister.code())); - Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); - lwz(result, MemOperand(location)); - } - // Get the address of the constant and retrieve it. - andi(result, result, Operand(kImm16Mask)); - LoadPX(result, MemOperand(kConstantPoolRegister, result)); -#else // This code assumes a FIXED_SEQUENCE for lis/ori if (emit_debug_code()) { And(result, result, Operand(kOpcodeMask | (0x1f * B16))); @@ -3543,7 +3313,6 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result, sldi(result, result, Operand(16)); rldimi(result, scratch, 0, 48); #endif -#endif } @@ -3929,23 +3698,6 @@ void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) { void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value, Register scratch) { -#if V8_OOL_CONSTANT_POOL - // TODO(mbrandy): enable extended constant pool usage for doubles. - // See ARM commit e27ab337 for a reference. - if (is_ool_constant_pool_available() && !is_constant_pool_full()) { - RelocInfo rinfo(pc_, value); - ConstantPoolAddEntry(rinfo); -#if V8_TARGET_ARCH_PPC64 - // We use 2 instruction sequence here for consistency with mov. - li(scratch, Operand::Zero()); - lfdx(result, MemOperand(kConstantPoolRegister, scratch)); -#else - lfd(result, MemOperand(kConstantPoolRegister, 0)); -#endif - return; - } -#endif - // avoid gcc strict aliasing error using union cast union { double dval; @@ -4081,6 +3833,46 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, #endif +void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src, + Register scratch) { +#if V8_TARGET_ARCH_PPC64 + if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { + mffprd(scratch, dst); + rldimi(scratch, src, 0, 32); + mtfprd(dst, scratch); + return; + } +#endif + + subi(sp, sp, Operand(kDoubleSize)); + stfd(dst, MemOperand(sp)); + stw(src, MemOperand(sp, Register::kMantissaOffset)); + nop(GROUP_ENDING_NOP); // LHS/RAW optimization + lfd(dst, MemOperand(sp)); + addi(sp, sp, Operand(kDoubleSize)); +} + + +void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, + Register scratch) { +#if V8_TARGET_ARCH_PPC64 + if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { + mffprd(scratch, dst); + rldimi(scratch, src, 32, 0); + mtfprd(dst, scratch); + return; + } +#endif + + subi(sp, sp, Operand(kDoubleSize)); + stfd(dst, MemOperand(sp)); + stw(src, MemOperand(sp, Register::kExponentOffset)); + nop(GROUP_ENDING_NOP); // LHS/RAW optimization + lfd(dst, MemOperand(sp)); + addi(sp, sp, Operand(kDoubleSize)); +} + + void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h index 146489d131..04e9bd85bd 100644 --- a/deps/v8/src/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/ppc/macro-assembler-ppc.h @@ -102,9 +102,7 @@ class MacroAssembler : public Assembler { MacroAssembler(Isolate* isolate, void* buffer, int size); - // Returns the size of a call in instructions. Note, the value returned is - // only valid as long as no entries are added to the constant pool between - // checking the call size and emitting the actual call. + // Returns the size of a call in instructions. static int CallSize(Register target); int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); static int CallSizeNotPredictableCodeSize(Address target, @@ -379,8 +377,9 @@ class MacroAssembler : public Assembler { void Prologue(bool code_pre_aging, int prologue_offset = 0); // Enter exit frame. - // stack_space - extra stack space, used for alignment before call to C. - void EnterExitFrame(bool save_doubles, int stack_space = 0); + // stack_space - extra stack space, used for parameters before call to C. + // At least one slot (for the return address) should be provided. + void EnterExitFrame(bool save_doubles, int stack_space = 1); // Leave the current exit frame. Expects the return value in r0. // Expect the number of values, pushed prior to the exit frame, to @@ -464,6 +463,8 @@ class MacroAssembler : public Assembler { void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, Register src_lo, Register scratch); #endif + void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch); + void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch); void MovDoubleLowToInt(Register dst, DoubleRegister src); void MovDoubleHighToInt(Register dst, DoubleRegister src); void MovDoubleToInt64( @@ -543,19 +544,12 @@ class MacroAssembler : public Assembler { // --------------------------------------------------------------------------- // Exception handling - // Push a new try handler and link into try handler chain. - void PushTryHandler(StackHandler::Kind kind, int handler_index); + // Push a new stack handler and link into stack handler chain. + void PushStackHandler(); - // Unlink the stack handler on top of the stack from the try handler chain. + // Unlink the stack handler on top of the stack from the stack handler chain. // Must preserve the result register. - void PopTryHandler(); - - // Passes thrown value to the handler of top of the try handler chain. - void Throw(Register value); - - // Propagates an uncatchable exception to the top of the current JS stack's - // handler chain. - void ThrowUncatchable(Register value); + void PopStackHandler(); // --------------------------------------------------------------------------- // Inline caching support @@ -684,6 +678,11 @@ class MacroAssembler : public Assembler { // --------------------------------------------------------------------------- // Support functions. + // Machine code version of Map::GetConstructor(). + // |temp| holds |result|'s map when done, and |temp2| its instance type. + void GetMapConstructor(Register result, Register map, Register temp, + Register temp2); + // Try to get function prototype of a function and puts the value in // the result register. Checks that the function really is a // function and jumps to the miss label if the fast checks fail. The @@ -1361,7 +1360,7 @@ class MacroAssembler : public Assembler { // --------------------------------------------------------------------------- // Patching helpers. - // Retrieve/patch the relocated value (lis/ori pair or constant pool load). + // Retrieve/patch the relocated value (lis/ori pair). void GetRelocatedValue(Register location, Register result, Register scratch); void SetRelocatedValue(Register location, Register scratch, Register new_value); @@ -1481,22 +1480,14 @@ class MacroAssembler : public Assembler { inline void GetMarkBits(Register addr_reg, Register bitmap_reg, Register mask_reg); - // Helper for throwing exceptions. Compute a handler address and jump to - // it. See the implementation for register usage. - void JumpToHandlerEntry(); + static const RegList kSafepointSavedRegisters; + static const int kNumSafepointSavedRegisters; // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); MemOperand SafepointRegistersAndDoublesSlot(Register reg); -#if V8_OOL_CONSTANT_POOL - // Loads the constant pool pointer (kConstantPoolRegister). - enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE }; - void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method, - int ip_code_entry_delta = 0); -#endif - bool generating_stub_; bool has_frame_; // This handle will be patched with the code object on installation. diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc index 0bb2da05ff..9b29004f3d 100644 --- a/deps/v8/src/ppc/simulator-ppc.cc +++ b/deps/v8/src/ppc/simulator-ppc.cc @@ -11,6 +11,7 @@ #if V8_TARGET_ARCH_PPC #include "src/assembler.h" +#include "src/base/bits.h" #include "src/codegen.h" #include "src/disasm.h" #include "src/ppc/constants-ppc.h" @@ -2998,8 +2999,7 @@ void Simulator::ExecuteExt5(Instruction* instr) { int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5)); DCHECK(sh >= 0 && sh <= 63); DCHECK(mb >= 0 && mb <= 63); - // rotate left - uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh)); + uintptr_t result = base::bits::RotateLeft64(rs_val, sh); uintptr_t mask = 0xffffffffffffffff >> mb; result &= mask; set_register(ra, result); @@ -3016,8 +3016,7 @@ void Simulator::ExecuteExt5(Instruction* instr) { int me = (instr->Bits(10, 6) | (instr->Bit(5) << 5)); DCHECK(sh >= 0 && sh <= 63); DCHECK(me >= 0 && me <= 63); - // rotate left - uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh)); + uintptr_t result = base::bits::RotateLeft64(rs_val, sh); uintptr_t mask = 0xffffffffffffffff << (63 - me); result &= mask; set_register(ra, result); @@ -3034,8 +3033,7 @@ void Simulator::ExecuteExt5(Instruction* instr) { int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5)); DCHECK(sh >= 0 && sh <= 63); DCHECK(mb >= 0 && mb <= 63); - // rotate left - uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh)); + uintptr_t result = base::bits::RotateLeft64(rs_val, sh); uintptr_t mask = (0xffffffffffffffff >> mb) & (0xffffffffffffffff << sh); result &= mask; set_register(ra, result); @@ -3052,8 +3050,7 @@ void Simulator::ExecuteExt5(Instruction* instr) { int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5)); int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5)); int me = 63 - sh; - // rotate left - uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh)); + uintptr_t result = base::bits::RotateLeft64(rs_val, sh); uintptr_t mask = 0; if (mb < me + 1) { uintptr_t bit = 0x8000000000000000 >> mb; @@ -3092,8 +3089,7 @@ void Simulator::ExecuteExt5(Instruction* instr) { int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5)); DCHECK(sh >= 0 && sh <= 63); DCHECK(mb >= 0 && mb <= 63); - // rotate left - uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh)); + uintptr_t result = base::bits::RotateLeft64(rs_val, sh); uintptr_t mask = 0xffffffffffffffff >> mb; result &= mask; set_register(ra, result); @@ -3268,8 +3264,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) { int sh = instr->Bits(15, 11); int mb = instr->Bits(10, 6); int me = instr->Bits(5, 1); - // rotate left - uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh)); + uint32_t result = base::bits::RotateLeft32(rs_val, sh); int mask = 0; if (mb < me + 1) { int bit = 0x80000000 >> mb; @@ -3311,8 +3306,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) { } int mb = instr->Bits(10, 6); int me = instr->Bits(5, 1); - // rotate left - uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh)); + uint32_t result = base::bits::RotateLeft32(rs_val, sh); int mask = 0; if (mb < me + 1) { int bit = 0x80000000 >> mb; |