summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2017-08-01 11:36:44 -0500
committerMyles Borins <mylesborins@google.com>2017-08-01 15:23:15 -0500
commit0a66b223e149a841669bfad5598e4254589730cb (patch)
tree5ec050f7f78aafbf5b1e0e50d639fb843141e162 /deps/v8/src/mips
parent1782b3836ba58ef0da6b687f2bb970c0bd8199ad (diff)
downloadnode-new-0a66b223e149a841669bfad5598e4254589730cb.tar.gz
deps: update V8 to 6.0.286.52
PR-URL: https://github.com/nodejs/node/pull/14004 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/OWNERS7
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h29
-rw-r--r--deps/v8/src/mips/assembler-mips.cc28
-rw-r--r--deps/v8/src/mips/assembler-mips.h27
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc94
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc75
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc18
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc243
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h14
-rw-r--r--deps/v8/src/mips/simulator-mips.cc24
-rw-r--r--deps/v8/src/mips/simulator-mips.h18
11 files changed, 341 insertions, 236 deletions
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 215c0efd88..9233913528 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -121,8 +121,17 @@ Address RelocInfo::target_address_address() {
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
- return reinterpret_cast<Address>(
- pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+ if (IsMipsArchVariant(kMips32r6)) {
+ // On R6 we don't move to the end of the instructions to be patched, but one
+ // instruction before, because if these instructions are at the end of the
+ // code object it can cause errors in the deserializer.
+ return reinterpret_cast<Address>(
+ pc_ +
+ (Assembler::kInstructionsFor32BitConstant - 1) * Assembler::kInstrSize);
+ } else {
+ return reinterpret_cast<Address>(
+ pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+ }
}
@@ -357,23 +366,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index ed3f50a817..3a37c16e5a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -2077,6 +2077,7 @@ void Assembler::lui(Register rd, int32_t j) {
void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
+ DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
@@ -3548,13 +3549,20 @@ void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // The new buffer.
+ CodeDesc desc; // the new buffer
if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2*buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -3746,11 +3754,17 @@ void Assembler::CheckTrampolinePool() {
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
- // Interpret 2 instructions generated by li: lui/ori
- if (IsLui(instr1) && IsOri(instr2)) {
- // Assemble the 32 bit value.
- return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
- GetImmediate16(instr2));
+ // Interpret 2 instructions generated by li (lui/ori) or optimized pairs
+ // lui/jic, aui/jic or lui/jialc.
+ if (IsLui(instr1)) {
+ if (IsOri(instr2)) {
+ // Assemble the 32 bit value.
+ return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
+ GetImmediate16(instr2));
+ } else if (IsJicOrJialc(instr2)) {
+ // Assemble the 32 bit value.
+ return reinterpret_cast<Address>(CreateTargetAddress(instr1, instr2));
+ }
}
// We should never get here, force a bad address if we do.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 61043eff64..7df318b9ab 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -356,6 +356,9 @@ constexpr DoubleRegister kLithiumScratchDouble = f30;
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips32r6 for compare operations.
constexpr DoubleRegister kDoubleCompareReg = f26;
+// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+constexpr Simd128Register kSimd128RegZero = w28;
+constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -436,6 +439,8 @@ class Operand BASE_EMBEDDED {
Register rm() const { return rm_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
Register rm_;
int32_t imm32_; // Valid if rm_ == no_reg.
@@ -591,10 +596,19 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
Address target) {
- set_target_address_at(
- isolate,
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
- target);
+ if (IsMipsArchVariant(kMips32r6)) {
+ // On R6 the address location is shifted by one instruction
+ set_target_address_at(
+ isolate,
+ instruction_payload -
+ (kInstructionsFor32BitConstant - 1) * kInstrSize,
+ code, target);
+ } else {
+ set_target_address_at(
+ isolate,
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code, target);
+ }
}
// This sets the internal reference at the pc.
@@ -625,7 +639,7 @@ class Assembler : public AssemblerBase {
// Distance between the instruction referring to the address of the call
// target and the return address.
#ifdef _MIPS_ARCH_MIPS32R6
- static constexpr int kCallTargetAddressOffset = 3 * kInstrSize;
+ static constexpr int kCallTargetAddressOffset = 2 * kInstrSize;
#else
static constexpr int kCallTargetAddressOffset = 4 * kInstrSize;
#endif
@@ -1913,6 +1927,9 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer();
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
inline static void set_target_internal_reference_encoded_at(Address pc,
Address target);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 2f5fa2cec1..0fcdafca21 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -1102,9 +1102,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(a1));
__ li(a2, Operand(pending_handler_offset_address));
__ lw(a2, MemOperand(a2));
- __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Addu(t9, a1, a2);
- __ Jump(t9);
+ __ Jump(t9, Code::kHeaderSize - kHeapObjectTag);
}
@@ -1237,8 +1236,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ lw(t9, MemOperand(t0)); // Deref address.
// Call JSEntryTrampoline.
- __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- __ Call(t9);
+ __ Call(t9, Code::kHeaderSize - kHeapObjectTag);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -1271,87 +1269,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers, meaning we
- // treat the return address as argument 5. Thus every argument after that
- // needs to be shifted back by 1. Since DirectCEntryStub will handle
- // allocating space for the c argument slots, we don't need to calculate
- // that into the argument positions on the stack. This is how the stack will
- // look (sp meaning the value of sp at this moment):
- // [sp + 5] - Argument 9
- // [sp + 4] - Argument 8
- // [sp + 3] - Argument 7
- // [sp + 2] - Argument 6
- // [sp + 1] - Argument 5
- // [sp + 0] - saved ra
-
- // Argument 9: Pass current isolate address.
- // CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
- __ sw(t1, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(t1, Operand(1));
- __ sw(t1, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ li(t1, Operand(address_of_regexp_stack_memory_address));
- __ lw(t1, MemOperand(t1, 0));
- __ li(t2, Operand(address_of_regexp_stack_memory_size));
- __ lw(t2, MemOperand(t2, 0));
- __ addu(t1, t1, t2);
- __ sw(t1, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(t1, zero_reg);
- __ sw(t1, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5: static offsets vector buffer.
- __ li(
- t1,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 4, a3: End of string data
- // Argument 3, a2: Start of string data
- CHECK(a3.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(a2.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (a1): Previous index.
- CHECK(a1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (a0): Subject string.
- CHECK(a0.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ Addu(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- // Return the smi-tagged result.
- __ SmiTag(v0);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
@@ -1502,8 +1419,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
__ bind(&non_function);
__ mov(a3, a1);
@@ -3057,9 +2973,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// Push return value and default return value.
__ Push(scratch, scratch);
__ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index c96eb67724..0c2d2c7544 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -30,25 +30,22 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->break_(0xCC);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->break_(0xCC);
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
}
DeoptimizationInputData* deopt_data =
@@ -326,14 +323,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label table_start, done, done_special, trampoline_jump;
+ Label table_start, done, trampoline_jump;
__ bind(&table_start);
int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
(table_entry_size_ / Assembler::kInstrSize);
@@ -346,6 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
+ __ nop();
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
@@ -356,34 +354,29 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ Push(at);
} else {
// Uncommon case, the branch cannot reach.
- // Create mini trampoline and adjust id constants to get proper value at
- // the end of table.
- for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+ // Create mini trampoline to reach the end of the table
+ for (int i = 0, j = 0; i < count(); i++, j++) {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
- __ li(at, - i); // In the delay slot.
+ if (j >= kMaxEntriesBranchReach) {
+ j = 0;
+ __ li(at, i);
+ __ bind(&trampoline_jump);
+ trampoline_jump = Label();
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
+ __ nop();
+ } else {
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ __ nop();
+ }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
- // Entry with id == kMaxEntriesBranchReach - 1.
- __ bind(&trampoline_jump);
- __ BranchShort(USE_DELAY_SLOT, &done_special);
- __ li(at, -1);
-
- for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
- }
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
- __ bind(&done_special);
- __ addiu(at, at, kMaxEntriesBranchReach);
- __ bind(&done);
+ __ bind(&trampoline_jump);
__ Push(at);
}
}
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 6e77ee835a..c1e8229e22 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return a0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return a1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return a2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return a3; }
-const Register RegExpExecDescriptor::CodeRegister() { return t0; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
@@ -161,8 +156,19 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
// a2: start index (to support rest parameters)
- Register registers[] = {a1, a2};
+ Register registers[] = {a1, a3, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 80d0505d70..6dd611e1f6 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -219,7 +219,7 @@ void MacroAssembler::RecordWriteField(
Addu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -287,7 +287,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(at, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, at, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -564,8 +564,13 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ if (is_int16(-rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
+ } else if (!(-rt.imm32_ & kHiMask) && !MustUseReg(rt.rmode_)) { // Use load
+ // -imm and addu for cases where loading -imm generates one instruction.
+ DCHECK(!rs.is(at));
+ li(at, -rt.imm32_);
+ addu(rd, rs, at);
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -3600,22 +3605,87 @@ bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
+void MacroAssembler::Jump(Register target, int16_t offset, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(is_int16(offset));
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ if (cond == cc_always) {
+ jic(target, offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(target, offset);
+ }
+ } else {
+ if (offset != 0) {
+ Addu(target, target, offset);
+ }
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
+ }
+}
-void MacroAssembler::Jump(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
+void MacroAssembler::Jump(Register target, Register base, int16_t offset,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
+ DCHECK(is_int16(offset));
BlockTrampolinePoolScope block_trampoline_pool(this);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
- jic(target, 0);
+ jic(base, offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(base, offset);
+ }
+ } else {
+ if (offset != 0) {
+ Addu(target, base, offset);
+ } else { // Call through target
+ if (!target.is(base)) mov(target, base);
+ }
+ if (cond == cc_always) {
+ jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
- jic(target, 0);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
+ }
+}
+
+void MacroAssembler::Jump(Register target, const Operand& offset,
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
+ !is_int16(offset.immediate())) {
+ uint32_t aui_offset, jic_offset;
+ Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
+ jic_offset);
+ RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
+ aui(target, target, aui_offset);
+ if (cond == cc_always) {
+ jic(target, jic_offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(target, jic_offset);
}
} else {
+ if (offset.immediate() != 0) {
+ Addu(target, target, offset);
+ }
if (cond == cc_always) {
jr(target);
} else {
@@ -3635,14 +3705,24 @@ void MacroAssembler::Jump(intptr_t target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label skip;
if (cond != cc_always) {
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
- li(t9, Operand(target, rmode));
- Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ uint32_t lui_offset, jic_offset;
+ UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ DCHECK(MustUseReg(rmode));
+ RecordRelocInfo(rmode, target);
+ lui(t9, lui_offset);
+ Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd);
+ } else {
+ li(t9, Operand(target, rmode));
+ Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd);
+ }
bind(&skip);
}
@@ -3669,11 +3749,8 @@ void MacroAssembler::Jump(Handle<Code> code,
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
-
-int MacroAssembler::CallSize(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
+int MacroAssembler::CallSize(Register target, int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
@@ -3685,16 +3762,59 @@ int MacroAssembler::CallSize(Register target,
if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
+ if (!IsMipsArchVariant(kMips32r6) && offset != 0) {
+ size += 1;
+ }
+
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
+void MacroAssembler::Call(Register target, int16_t offset, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
+ DCHECK(is_int16(offset));
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ if (cond == cc_always) {
+ jialc(target, offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jialc(target, offset);
+ }
+ } else {
+ if (offset != 0) {
+ Addu(target, target, offset);
+ }
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
+ }
+
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
+}
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target, Register base, int16_t offset,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
+ DCHECK(is_uint16(offset));
#ifdef DEBUG
int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
#endif
@@ -3704,13 +3824,18 @@ void MacroAssembler::Call(Register target,
bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
- jialc(target, 0);
+ jialc(base, offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
- jialc(target, 0);
+ jialc(base, offset);
}
} else {
+ if (offset != 0) {
+ Addu(target, base, offset);
+ } else { // Call through target
+ if (!target.is(base)) mov(target, base);
+ }
if (cond == cc_always) {
jalr(target);
} else {
@@ -3723,7 +3848,7 @@ void MacroAssembler::Call(Register target,
}
#ifdef DEBUG
- CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
@@ -3735,8 +3860,11 @@ int MacroAssembler::CallSize(Address target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- int size = CallSize(t9, cond, rs, rt, bd);
- return size + 2 * kInstrSize;
+ int size = CallSize(t9, 0, cond, rs, rt, bd);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always)
+ return size + 1 * kInstrSize;
+ else
+ return size + 2 * kInstrSize;
}
@@ -3746,12 +3874,23 @@ void MacroAssembler::Call(Address target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ CheckBuffer();
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
int32_t target_int = reinterpret_cast<int32_t>(target);
- li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
- Call(t9, cond, rs, rt, bd);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
+ uint32_t lui_offset, jialc_offset;
+ UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
+ if (MustUseReg(rmode)) {
+ RecordRelocInfo(rmode, target_int);
+ }
+ lui(t9, lui_offset);
+ Call(t9, jialc_offset, cond, rs, rt, bd);
+ } else {
+ li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
+ Call(t9, 0, cond, rs, rt, bd);
+ }
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -3796,7 +3935,7 @@ void MacroAssembler::Ret(Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- Jump(ra, cond, rs, rt, bd);
+ Jump(ra, 0, cond, rs, rt, bd);
}
@@ -3825,9 +3964,8 @@ void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
- // references
- // until associated instructions are emitted and available to be
- // patched.
+ // references until associated instructions are emitted and
+ // available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
@@ -3850,8 +3988,8 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
uint32_t imm32;
imm32 = jump_address(L);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
- uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+ uint32_t lui_offset, jialc_offset;
+ UnpackTargetAddressUnsigned(imm32, lui_offset, jialc_offset);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
@@ -3859,16 +3997,15 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, lui_offset);
- jialc(at, jic_offset);
+ jialc(at, jialc_offset);
}
CheckBuffer();
} else {
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
- // references
- // until associated instructions are emitted and available to be
- // patched.
+ // references until associated instructions are emitted and
+ // available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
@@ -6047,15 +6184,27 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- li(t8, Operand(function));
- CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+ if (IsMipsArchVariant(kMips32r6)) {
+ uint32_t lui_offset, jialc_offset;
+ UnpackTargetAddressUnsigned(Operand(function).immediate(), lui_offset,
+ jialc_offset);
+ if (MustUseReg(Operand(function).rmode())) {
+ RecordRelocInfo(Operand(function).rmode(), Operand(function).immediate());
+ }
+ lui(t9, lui_offset);
+ CallCFunctionHelper(t9, jialc_offset, num_reg_arguments,
+ num_double_arguments);
+ } else {
+ li(t9, Operand(function));
+ CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
+ }
}
void MacroAssembler::CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments);
}
@@ -6070,10 +6219,11 @@ void MacroAssembler::CallCFunction(Register function,
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunctionHelper(Register function,
+void MacroAssembler::CallCFunctionHelper(Register function_base,
+ int16_t function_offset,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
@@ -6102,12 +6252,12 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
- if (!function.is(t9)) {
- mov(t9, function);
- function = t9;
+ if (!function_base.is(t9)) {
+ mov(t9, function_base);
+ function_base = t9;
}
- Call(function);
+ Call(function_base, function_offset);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@@ -6442,6 +6592,7 @@ CodePatcher::~CodePatcher() {
}
// Check that the code was patched as expected.
+
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 94802f8858..3b2539e408 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -180,12 +180,15 @@ class MacroAssembler: public Assembler {
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
- void Jump(Register target, COND_ARGS);
+ void Jump(Register target, int16_t offset = 0, COND_ARGS);
+ void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
+ void Jump(Register target, const Operand& offset, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Register target, COND_ARGS);
- void Call(Register target, COND_ARGS);
+ static int CallSize(Register target, int16_t offset = 0, COND_ARGS);
+ void Call(Register target, int16_t offset = 0, COND_ARGS);
+ void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
@@ -1664,9 +1667,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
+ void CallCFunctionHelper(Register function_base, int16_t function_offset,
+ int num_reg_arguments, int num_double_arguments);
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 94c3112ff4..38816e9e0d 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1016,6 +1016,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -2028,12 +2030,11 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7,
+ int32_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
int32_t arg2, int32_t arg3,
@@ -2076,6 +2077,10 @@ void Simulator::SoftwareInterrupt() {
// Args 4 and 5 are on the stack after the reserved space for args 0..3.
int32_t arg4 = stack_pointer[4];
int32_t arg5 = stack_pointer[5];
+ int32_t arg6 = stack_pointer[6];
+ int32_t arg7 = stack_pointer[7];
+ int32_t arg8 = stack_pointer[8];
+ STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2282,11 +2287,12 @@ void Simulator::SoftwareInterrupt() {
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p "
- "args %08x, %08x, %08x, %08x, %08x, %08x\n",
+ "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x\n",
static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
- arg4, arg5);
+ arg4, arg5, arg6, arg7, arg8);
}
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ int64_t result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
set_register(v0, static_cast<int32_t>(result));
set_register(v1, static_cast<int32_t>(result >> 32));
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 2785f913c9..1ed96bd003 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -26,18 +26,15 @@ namespace internal {
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
-typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
-
+typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+ int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
- p7, p8))
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -466,7 +463,7 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
@@ -530,9 +527,8 @@ class Simulator {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- Simulator::current(isolate) \
- ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-
+ Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
+ p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of