summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/assembler-mips.cc44
-rw-r--r--deps/v8/src/mips/assembler-mips.h33
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc491
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h19
-rw-r--r--deps/v8/src/mips/codegen-mips.cc373
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc8
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc675
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h146
-rw-r--r--deps/v8/src/mips/simulator-mips.cc8
9 files changed, 346 insertions, 1451 deletions
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 865e64c87d..784185ac0d 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -204,13 +204,18 @@ uint32_t RelocInfo::wasm_memory_size_reference() {
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -895,8 +900,7 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
- next(&l, internal_reference_positions_.find(l.pos()) !=
- internal_reference_positions_.end());
+ next(&l, is_internal_reference(&l));
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -910,14 +914,15 @@ void Assembler::bind_to(Label* L, int pos) {
bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
- next_buffer_check_ += kTrampolineSlotsSize;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
}
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
- is_internal = internal_reference_positions_.find(fixup_pos) !=
- internal_reference_positions_.end();
+ is_internal = is_internal_reference(L);
next(L, is_internal); // Call next before overwriting link with target at
// fixup_pos.
Instr instr = instr_at(fixup_pos);
@@ -934,7 +939,6 @@ void Assembler::bind_to(Label* L, int pos) {
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
}
target_at_put(fixup_pos, pos, false);
} else {
@@ -1779,9 +1783,18 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// Helper for base-reg + offset, when offset is larger than int16.
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
DCHECK(!src.rm().is(at));
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- addu(at, at, src.rm()); // Add base register.
+ if (IsMipsArchVariant(kMips32r6)) {
+ int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+ if (src.offset_ & kNegOffset) {
+ hi += 1;
+ }
+ aui(at, src.rm(), hi);
+ addiu(at, at, src.offset_ & kImm16Mask);
+ } else {
+ lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ addu(at, at, src.rm()); // Add base register.
+ }
}
// Helper for base-reg + upper part of offset, when offset is larger than int16.
@@ -1797,8 +1810,13 @@ int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
if (src.offset_ & kNegOffset) {
hi += 1;
}
- lui(at, hi);
- addu(at, at, src.rm());
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ aui(at, src.rm(), hi);
+ } else {
+ lui(at, hi);
+ addu(at, at, src.rm());
+ }
return (src.offset_ & kImm16Mask);
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 1df6e3f5ad..dec4c18889 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -68,7 +68,7 @@ namespace internal {
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
- V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+ V(f16) V(f18) V(f20) V(f22) V(f24)
// clang-format on
// CPU Registers.
@@ -282,8 +282,7 @@ const DoubleRegister f31 = {31};
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips32r6 for compare operations.
-// We use the last non-callee saved odd register for O32 ABI
-#define kDoubleCompareReg f19
+#define kDoubleCompareReg f26
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -552,6 +551,17 @@ class Assembler : public AssemblerBase {
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
+ // Max offset for instructions with 16-bit offset field
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ // Max offset for compact branch instructions with 26-bit offset field
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
+
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kTrampolineSlotsSize = 2 * kInstrSize;
+#else
+ static const int kTrampolineSlotsSize = 4 * kInstrSize;
+#endif
// ---------------------------------------------------------------------------
// Code generation.
@@ -1029,9 +1039,6 @@ class Assembler : public AssemblerBase {
// Debugging.
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@@ -1169,6 +1176,9 @@ class Assembler : public AssemblerBase {
}
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+ static bool IsCompactBranchSupported() {
+ return IsMipsArchVariant(kMips32r6);
+ }
inline int UnboundLabelsCount() { return unbound_labels_count_; }
@@ -1443,18 +1453,15 @@ class Assembler : public AssemblerBase {
// branch instruction generation, where we use jump instructions rather
// than regular branch instructions.
bool trampoline_emitted_;
-#ifdef _MIPS_ARCH_MIPS32R6
- static const int kTrampolineSlotsSize = 2 * kInstrSize;
-#else
- static const int kTrampolineSlotsSize = 4 * kInstrSize;
-#endif
- static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 966214be8c..f75c02f677 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -684,8 +673,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -916,7 +908,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2218,51 +2209,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ And(scratch, dest, Operand(kPointerAlignmentMask));
- __ Check(eq,
- kDestinationOfCopyNotAligned,
- scratch,
- Operand(zero_reg));
- }
-
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
-
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ Addu(count, count, count);
- }
-
- Register limit = count; // Read until dest equals this.
- __ Addu(limit, dest, Operand(count));
-
- Label loop_entry, loop;
- // Copy bytes from src to dest until dest hits limit.
- __ Branch(&loop_entry);
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ Addu(src, src, Operand(1));
- __ sb(scratch, MemOperand(dest));
- __ Addu(dest, dest, Operand(1));
- __ bind(&loop_entry);
- __ Branch(&loop, lt, dest, Operand(limit));
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -2889,85 +2835,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(miss, ne, at, Operand(zero_reg));
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
- __ Subu(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Addu(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ srl(scratch2, scratch2, Name::kHashShift);
- __ And(scratch2, scratch1, scratch2);
-
- // Scale the index by multiplying by the element size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
-
- __ Lsa(scratch2, scratch2, scratch2, 1);
-
- // Check if the key is identical to the name.
- __ Lsa(scratch2, elements, scratch2, 2);
- __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
- __ Branch(done, eq, name, Operand(at));
- }
-
- const int spill_mask =
- (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
- a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ MultiPush(spill_mask);
- if (name.is(a0)) {
- DCHECK(!elements.is(a1));
- __ Move(a1, name);
- __ Move(a0, elements);
- } else {
- __ Move(a0, elements);
- __ Move(a1, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(scratch2, a2);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, ne, at, Operand(zero_reg));
- __ Branch(miss, eq, at, Operand(zero_reg));
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3266,233 +3133,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ lw(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
- // found, now call handler.
- Register handler = feedback;
- __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ lw(cached_map, MemOperand(pointer_reg));
- __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
- __ lw(handler, MemOperand(pointer_reg, kPointerSize));
- __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&prepare_next);
- __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
- __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ Branch(try_array, ne, cached_map, Operand(receiver_map));
- Register handler = feedback;
-
- __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ lw(cached_map, MemOperand(pointer_reg));
- __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
- // Is it a transitioning store?
- __ lw(too_far, MemOperand(pointer_reg, kPointerSize));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&transition_call, ne, too_far, Operand(at));
- __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&transition_call);
- __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(feedback, too_far);
-
- __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&prepare_next);
- __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = StoreWithVectorDescriptor::NameRegister(); // a2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
- Register feedback = t1;
- Register receiver_map = t2;
- Register scratch1 = t5;
-
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, scratch1, Operand(at));
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- Register scratch2 = t4;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&try_poly_name, ne, feedback, Operand(at));
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Branch(&miss, ne, key, Operand(feedback));
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ Branch(USE_DELAY_SLOT, &compare_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3847,127 +3487,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
- __ AssertReceiver(a3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ GetObjectType(a3, a2, a2);
- __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it's in fact a map.
- __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &new_object);
- __ GetObjectType(a2, a0, a0);
- __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&new_object, ne, a0, Operand(a1));
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ sw(a2, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Addu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- v0 : result (tagged)
- // -- a1 : result fields (untagged)
- // -- t1 : result end (untagged)
- // -- a2 : initial map
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
- __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(a1, t1, a0);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ sll(t0, t0, kPointerSizeLog2);
- __ subu(t0, t1, t0);
- __ InitializeFieldsWithFiller(a1, t0, a0);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(a1, t1, a0);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(&finalize, eq, a3, Operand(zero_reg));
- __ Ret();
-
- // Finalize the instance size.
- __ bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(v0, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(v0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
- __ Push(a2, t0);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a2);
- }
- __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Lsa(t1, v0, t1, kPointerSizeLog2);
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Subu(t1, t1, Operand(kHeapObjectTag));
- __ jmp(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(a1, a3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : function
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 751095d8d8..e2dd4a9b28 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -16,17 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in v0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -311,14 +300,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 8aaeaca367..a57299abf6 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -605,351 +605,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = t0;
- DCHECK(!AreAliased(receiver, key, value, target_map,
- scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch_elements, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register ra contains the return address.
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- Register elements = t0;
- Register length = t1;
- Register array = t2;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = t5;
- Register scratch3 = t3;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, length, array, scratch2));
-
- Register scratch = t6;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(elements));
-
- __ push(ra);
- __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ sll(scratch, length, 2);
- __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- // array: destination FixedDoubleArray, tagged as heap object
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset));
-
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag));
- __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- scratch1,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
-
- // Prepare for conversion loop.
- __ Addu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch3, array,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(array_end, scratch3, length, 2);
-
- // Repurpose registers no longer in use.
- Register hole_lower = elements;
- Register hole_upper = length;
- __ li(hole_lower, Operand(kHoleNanLower32));
- __ li(hole_upper, Operand(kHoleNanUpper32));
-
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch3: begin of FixedDoubleArray element fields, not tagged
-
- __ Branch(&entry);
-
- __ bind(&only_change_map);
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ lw(ra, MemOperand(sp, 0));
- __ Branch(USE_DELAY_SLOT, fail);
- __ addiu(sp, sp, kPointerSize); // In delay slot.
-
- // Convert and copy elements.
- __ bind(&loop);
- __ lw(scratch2, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kIntSize);
- // scratch2: current element
- __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
-
- // Normal smi, convert to double and store.
- __ mtc1(scratch2, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(scratch3));
- __ Branch(USE_DELAY_SLOT, &entry);
- __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot.
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ SmiTag(scratch2);
- __ Or(scratch2, scratch2, Operand(1));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
- }
- // mantissa
- __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
- // exponent
- __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
- __ addiu(scratch3, scratch3, kDoubleSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, scratch3, Operand(array_end));
-
- __ bind(&done);
- __ pop(ra);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register ra contains the return address.
- Label entry, loop, convert_hole, gc_required, only_change_map;
- Register elements = t0;
- Register array = t2;
- Register length = t1;
- Register scratch = t5;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, array, length, scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(elements));
-
- __ MultiPush(
- value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
- __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ sll(array_size, length, 1);
- __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- __ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ Addu(src_elements, src_elements, Operand(
- FixedDoubleArray::kHeaderSize - kHeapObjectTag
- + Register::kExponentOffset));
- __ Addu(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(dst_end, dst_elements, dst_end, 1);
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialization_loop_entry);
- __ bind(&initialization_loop);
- __ sw(scratch, MemOperand(dst_elements));
- __ Addu(dst_elements, dst_elements, Operand(kPointerSize));
- __ bind(&initialization_loop_entry);
- __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
-
- __ Addu(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields, not tagged,
- // points to the exponent
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // heap_number_map: heap number map
- __ Branch(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ MultiPop(
- value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
- __ Branch(fail);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ lw(upper_bits, MemOperand(src_elements));
- __ Addu(src_elements, src_elements, kDoubleSize);
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
- __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- Register scratch3 = t6;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
- &gc_required);
- // heap_number: new heap number
- // Load mantissa of current element, src_elements
- // point to exponent of next element.
- __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
- - Register::kExponentOffset - kDoubleSize)));
- __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
- __ mov(scratch2, dst_elements);
- __ sw(heap_number, MemOperand(dst_elements));
- __ Addu(dst_elements, dst_elements, kIntSize);
- __ RecordWrite(array,
- scratch2,
- heap_number,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ sw(scratch2, MemOperand(dst_elements));
- __ Addu(dst_elements, dst_elements, kIntSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, dst_elements, Operand(dst_end));
-
- __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- array,
- scratch,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(ra);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
@@ -1076,37 +731,29 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Assembler::target_address_at(
- sequence + Assembler::kInstrSize);
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ Address target_address =
+ Assembler::target_address_at(sequence + Assembler::kInstrSize);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
patcher.masm()->li(
t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 486ae68324..c6233c5993 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -64,13 +64,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3};
+ Register registers[] = {a1, a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c3abe4fa6f..25413f9a54 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1998,6 +1998,49 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
}
}
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ madd_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ add_s(fd, fr, scratch);
+ }
+}
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ madd_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ add_d(fd, fr, scratch);
+ }
+}
+
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ msub_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ sub_s(fd, scratch, fr);
+ }
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ msub_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ sub_d(fd, scratch, fr);
+ }
+}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
@@ -2325,186 +2368,6 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
}
}
-#define __ masm->
-
-static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveHigh(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x80000000));
- __ Move_d(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_d(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveLow(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x80000000));
- __ Move_s(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_s(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-#undef __
-
-void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- min_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, gt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- max_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, lt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- min_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, gt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- max_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, lt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
void MacroAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
@@ -4334,110 +4197,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
Addu(result, result, Operand(kHeapObjectTag));
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- sll(scratch1, length, 1); // Length in bytes, not chars.
- addiu(scratch1, scratch1,
- kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string
- // while observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4518,77 +4277,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
- scratch3));
- Label smi_value, done;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, turn potential sNaN into qNan.
- DoubleRegister double_result = f0;
- DoubleRegister double_scratch = f2;
-
- ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
- FPUCanonicalizeNaN(double_result, double_result);
-
- bind(&smi_value);
- Register untagged_value = scratch2;
- SmiUntag(untagged_value, value_reg);
- mtc1(untagged_value, double_scratch);
- cvt_d_w(double_result, double_scratch);
-
- bind(&done);
- Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- // scratch1 is now effective address of the double element
- sdc1(double_result, MemOperand(scratch1, 0));
-}
-
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
Handle<Map> map,
@@ -4870,17 +4558,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- li(t0, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, lt, t0, Operand(StepIn));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4897,7 +4583,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -4911,7 +4597,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -4925,8 +4611,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(a1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -5603,27 +5289,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- lw(scratch, NativeContextMemOperand());
- lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- Branch(no_map_match, ne, map_in_out, Operand(at));
-
- // Use the transitioned cached map.
- lw(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
lw(dst, NativeContextMemOperand());
lw(dst, ContextMemOperand(dst, index));
@@ -5661,7 +5326,7 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
li(t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
CONSTANT_SIZE);
@@ -5955,14 +5620,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
SmiUntag(dst, src);
}
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
- Register src,
- Label* non_smi_case) {
- JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
void MacroAssembler::JumpIfSmi(Register value,
Label* smi_label,
Register scratch,
@@ -6157,6 +5814,179 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
scratch2, failure);
}
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_right, nullptr, lt, src1, src2);
+ BranchF32(&return_left, nullptr, lt, src2, src1);
+
+ // Operands are equal, but check for +/-0.
+ mfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_left, nullptr, lt, src1, src2);
+ BranchF32(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ mfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_right, nullptr, lt, src1, src2);
+ BranchF64(&return_left, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ Mfhc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MaxOutOfLine(DoubleRegister dst,
+ DoubleRegister src1,
+ DoubleRegister src2) {
+ add_d(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_left, nullptr, lt, src1, src2);
+ BranchF64(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ Mfhc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MinOutOfLine(DoubleRegister dst,
+ DoubleRegister src1,
+ DoubleRegister src2) {
+ add_d(dst, src1, src2);
+}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
@@ -6172,19 +6002,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatOneByteStringMask));
- Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
-}
-
-
static const int kRegisterPassedArguments = 4;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -6622,40 +6439,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg;
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again, end;
-
- // Scratch contained elements pointer.
- Move(current, object);
- lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&end, eq, current, Operand(factory->null_value()));
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
- lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
- lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&loop_again, ne, current, Operand(factory->null_value()));
-
- bind(&end);
-}
-
-
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
Register reg9, Register reg10) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 824a3bf14d..66ac930ad2 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -208,6 +208,12 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kSwitchTablePrologueSize = 5;
+#else
+ static const int kSwitchTablePrologueSize = 10;
+#endif
// GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
// functor/function with 'Label *func(size_t index)' declaration.
template <typename Func>
@@ -305,17 +311,6 @@ class MacroAssembler: public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
- // Min, Max macros.
- // On pre-r6 these functions may modify at and t8 registers.
- void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
-
void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
@@ -560,32 +555,6 @@ class MacroAssembler: public Assembler {
void FastAllocate(Register object_size, Register result, Register result_new,
Register scratch, AllocationFlags flags);
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -892,6 +861,15 @@ class MacroAssembler: public Assembler {
// general-purpose register.
void Mfhc1(Register rt, FPURegister fs);
+ void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+
// Wrapper functions for the different cmp/branch types.
inline void BranchF32(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
@@ -1037,17 +1015,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@@ -1080,9 +1047,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -1158,30 +1126,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
// "branch_to" if the result of the comparison is "cond". If multiple map
@@ -1331,6 +1275,31 @@ class MacroAssembler: public Assembler {
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+ // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+ // handled in out-of-line code. The specific behaviour depends on supported
+ // instructions.
+ //
+ // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+ Label* out_of_line);
+ void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2);
+ void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2);
+
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1557,10 +1526,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
// Jump the register contains a smi.
void JumpIfSmi(Register value,
Label* smi_label,
@@ -1630,11 +1595,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
@@ -1731,20 +1691,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- Branch(memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
private:
@@ -1871,13 +1817,13 @@ template <typename Func>
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
if (kArchVariant >= kMips32r6) {
- BlockTrampolinePoolFor(case_count + 5);
+ BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
addiupc(at, 5);
Lsa(at, at, index, kPointerSizeLog2);
lw(at, MemOperand(at));
} else {
Label here;
- BlockTrampolinePoolFor(case_count + 10);
+ BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
push(ra);
bal(&here);
sll(at, index, kPointerSizeLog2); // Branch delay slot.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index bd423996d8..7ff3d144e7 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -2537,11 +2537,11 @@ void Simulator::DecodeTypeRegisterDRsType() {
break;
case MADDF_D:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), fd + (fs * ft));
+ set_fpu_register_double(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_D:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), fd - (fs * ft));
+ set_fpu_register_double(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_D:
set_fpu_register_double(
@@ -2964,11 +2964,11 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
case MADDF_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), fd + (fs * ft));
+ set_fpu_register_float(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), fd - (fs * ft));
+ set_fpu_register_float(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_S:
set_fpu_register_float(