summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips64
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-04-07 14:06:55 -0700
committerAli Ijaz Sheikh <ofrobots@google.com>2016-04-14 10:03:39 -0700
commit52af5c4eebf4de8638aef0338bd826656312a02a (patch)
tree628dc9fb0b558c3a73a2160706fef368876fe548 /deps/v8/src/mips64
parent6e3e8acc7cc7ebd3d67db5ade1247b8b558efe09 (diff)
downloadnode-new-52af5c4eebf4de8638aef0338bd826656312a02a.tar.gz
deps: upgrade V8 to 5.0.71.32
* Pick up the branch head for V8 5.0 stable [1] * Edit v8 gitignore to allow trace_event copy * Update V8 DEP trace_event as per deps/v8/DEPS [2] [1] https://chromium.googlesource.com/v8/v8.git/+/3c67831 [2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/4b09207e447ae5bd34643b4c6321bee7b76d35f9 Ref: https://github.com/nodejs/node/pull/5945 PR-URL: https://github.com/nodejs/node/pull/6111 Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com> Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: indutny - Fedor Indutny <fedor.indutny@gmail.com>
Diffstat (limited to 'deps/v8/src/mips64')
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h32
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc3
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h5
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc713
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc1386
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc6
-rw-r--r--deps/v8/src/mips64/constants-mips64.h25
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc39
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc61
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc322
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h74
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc270
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h28
13 files changed, 1699 insertions, 1265 deletions
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 09436ed1d4..37ee3a6807 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -213,8 +213,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -282,10 +282,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -349,28 +347,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr instr0 = Assembler::instr_at(pc_); // lui.
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); // ori.
- Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize); // dsll.
- Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize); // ori.
- Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); // jalr.
-
- bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
- (instr1 & kOpcodeMask) == ORI &&
- (instr2 & kFunctionFieldMask) == DSLL &&
- (instr3 & kOpcodeMask) == ORI &&
- (instr4 & kFunctionFieldMask) == JALR);
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 9c313a18d6..f0d3eba6b6 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -3270,10 +3270,9 @@ void Assembler::CheckTrampolinePool() {
bc(&after_pool);
} else {
b(&after_pool);
- nop();
}
+ nop();
- EmitForbiddenSlotInstruction();
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{ BlockGrowBufferScope block_buf_growth(this);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index f8d315d835..bf2285a2d5 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -306,6 +306,8 @@ struct FPUControlRegister {
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
+// TODO(mips64) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands.
@@ -1092,7 +1094,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
@@ -1272,7 +1274,6 @@ class Assembler : public AssemblerBase {
void EmitForbiddenSlotInstruction() {
if (IsPrevInstrCompactBranch()) {
nop();
- ClearCompactBranchState();
}
}
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index 3a9980beab..1d8d5d3599 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -141,6 +141,109 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in a1 and the double value in f0.
+ __ LoadRoot(a1, root_index);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ mov(a3, a0);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ Dsubu(a0, a0, Operand(1));
+ __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+
+ // Load the next parameter tagged value into a2.
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
+ __ ld(a2, MemOperand(at));
+
+ // Load the double value of the parameter into f2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(a2, &convert_smi);
+ __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ JumpIfRoot(a4, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a3);
+ __ Push(a0, a1, a3);
+ __ mov(a0, a2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a2, v0);
+ __ Pop(a0, a1, a3);
+ {
+ // Restore the double accumulator value (f0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(a1, &restore_smi);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&done_restore);
+ __ bind(&restore_smi);
+ __ SmiToDoubleFPURegister(a1, f0, a4);
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(a3);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+ __ jmp(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDoubleFPURegister(a2, f2, a4);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (f0) and the next parameter value on the right hand side (f2).
+ Label compare_equal, compare_nan, compare_swap;
+ __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
+ __ BranchF(&compare_swap, nullptr, cc, f0, f2);
+ __ Branch(&loop);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ __ FmoveHigh(a4, reg);
+ // Make a4 unsigned.
+ __ dsll32(a4, a4, 0);
+ __ Branch(&loop, ne, a4, Operand(0x8000000000000000));
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ mov_d(f0, f2);
+ __ mov(a1, a2);
+ __ jmp(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ Dlsa(sp, sp, a3, kPointerSizeLog2);
+ __ mov(v0, a1);
+ __ DropAndRet(1);
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -156,8 +259,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
}
@@ -192,8 +294,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -232,8 +333,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -257,8 +359,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
}
@@ -319,8 +420,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -361,33 +461,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
__ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
}
-
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- a1 : target function (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -----------------------------------
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push a copy of the target function and the new target.
- __ Push(a1, a3, a1);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(a1, a3);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -395,8 +477,26 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Jump(at);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3, a1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore target function and new target.
+ __ Pop(a0, a1, a3);
+ __ SmiUntag(a0);
+ }
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
__ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -412,8 +512,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ LoadRoot(a4, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(a4));
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -422,7 +521,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -444,143 +544,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(a2, a0);
if (create_implicit_receiver) {
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ GetObjectType(a3, a5, a4);
- __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it is in fact a map.
- // a3: new target
- __ ld(a2,
- FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&rt_call, ne, a1, Operand(a5));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Allocate(a4, t0, a4, t2, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t0: JSObject (not HeapObject tagged - the actual address).
- // a4: start of next object
- __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t1, t0);
- STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
- __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
- STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
- __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
- STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
- __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
- STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
- __ Daddu(t1, t1, Operand(3 * kPointerSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Daddu(t0, t0, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with appropriate filler.
- // t0: JSObject (tagged)
- // t1: First in-object property of JSObject (not tagged)
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwu(t2, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(a6, t2);
- // a6: slack tracking counter
- __ Branch(&no_inobject_slack_tracking, lt, a6,
- Operand(Map::kSlackTrackingCounterEnd));
- // Decrease generous allocation count.
- __ Dsubu(t2, t2, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(t2, bit_field3);
-
- // Allocate object with a slack.
- __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ dsubu(a0, a4, a0);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t1,
- Operand(a0));
- }
- __ InitializeFieldsWithFiller(t1, a0, t3);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(t1, a4, t3);
-
- // a6: slack tracking counter value before decreasing.
- __ Branch(&allocated, ne, a6, Operand(Map::kSlackTrackingCounterEnd));
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(a1, a3, t0, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(a1, a3, t0);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t0: JSObject
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(t1, a4, t3);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t0: JSObject
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(a1, a3, a1, a3); // constructor function, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a1, a3);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(t0, v0);
__ Pop(a1, a3);
- // Receiver for constructor call allocated.
- // a1: constructor function
- // a3: new target
- // t0: JSObject
- __ bind(&allocated);
-
+ // ----------- S t a t e -------------
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- t0: newly allocated object
+ // -----------------------------------
__ ld(a0, MemOperand(sp));
}
__ SmiUntag(a0);
@@ -610,8 +584,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(t0, a0);
__ jmp(&entry);
__ bind(&loop);
- __ dsll(a4, t0, kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
+ __ Dlsa(a4, a2, t0, kPointerSizeLog2);
__ ld(a5, MemOperand(a4));
__ push(a5);
__ bind(&entry);
@@ -677,6 +650,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(v0, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
__ SmiScale(a4, a1, kPointerSizeLog2);
__ Daddu(sp, sp, a4);
__ Daddu(sp, sp, kPointerSize);
@@ -688,17 +674,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -778,8 +770,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// a3: argc
// s0: argv, i.e. points to first arg
Label loop, entry;
- __ dsll(a4, a3, kPointerSizeLog2);
- __ daddu(a6, s0, a4);
+ __ Dlsa(a6, s0, a3, kPointerSizeLog2);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// a6 points past last arg.
@@ -841,10 +832,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o sp: stack pointer
// o ra: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -853,16 +842,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ Push(a3);
-
- // Push zero for bytecode array offset.
- __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ DCHECK(!debug_info.is(a0));
+ __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ Branch(&load_debug_bytecode_array, ne, debug_info,
+ Operand(DebugInfo::uninitialized()));
__ ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -874,6 +866,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size (word) from the BytecodeArray object.
@@ -904,44 +899,38 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Daddu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0));
- __ dsll(at, a0, kPointerSizeLog2);
- __ Daddu(at, kInterpreterDispatchTableRegister, at);
+ __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ ld(at, MemOperand(at));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ __ ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ Branch(&bytecode_array_loaded);
}
@@ -966,7 +955,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -991,7 +981,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
__ Branch(&loop_header, gt, a2, Operand(a3));
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1026,47 +1018,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use this for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ Daddu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ ld(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ ld(a1,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+ __ ld(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1089,14 +1058,36 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ dsll(a1, a1, kPointerSizeLog2);
- __ Daddu(a1, kInterpreterDispatchTableRegister, a1);
+ __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ ld(a1, MemOperand(a1));
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1111,22 +1102,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1346,13 +1345,12 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
- __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
- __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lwu(scratch, FieldMemOperand(map, Map::kBitField3Offset));
- __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ DecodeField<Map::HasHiddenPrototype>(scratch);
__ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+
+ __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ Branch(&prototype_loop_start);
@@ -1377,8 +1375,7 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// Do the compatible receiver check
Label receiver_check_failed;
- __ sll(at, a0, kPointerSizeLog2);
- __ Daddu(t8, sp, at);
+ __ Dlsa(t8, sp, a0, kPointerSizeLog2);
__ ld(t0, MemOperand(t8));
CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
@@ -1512,6 +1509,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ mov(a3, a2);
+ // Dlsa() cannot be used hare as scratch value used later.
__ dsll(scratch, a0, kPointerSizeLog2);
__ Daddu(a0, sp, Operand(scratch));
__ ld(a1, MemOperand(a0)); // receiver
@@ -1582,8 +1580,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ ld(a1, MemOperand(at));
// 3. Shift arguments and return address one slot down on the stack
@@ -1594,8 +1591,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a2, sp, at);
+ __ Dlsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
__ ld(at, MemOperand(a2, -kPointerSize));
@@ -1695,6 +1691,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ mov(a2, a1);
+ // Dlsa() cannot be used hare as scratch value used later.
__ dsll(scratch, a0, kPointerSizeLog2);
__ Daddu(a0, sp, Operand(scratch));
__ sd(a2, MemOperand(a0)); // receiver
@@ -1850,9 +1847,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ ld(a2,
- FieldMemOperand(a0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
__ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
__ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
__ Branch(&create_runtime, ne, a2, Operand(at));
@@ -1906,8 +1901,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done, loop;
__ bind(&loop);
__ Branch(&done, eq, a4, Operand(a2));
- __ dsll(at, a4, kPointerSizeLog2);
- __ Daddu(at, a0, at);
+ __ Dlsa(at, a0, a4, kPointerSizeLog2);
__ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Push(at);
__ Daddu(a4, a4, Operand(1));
@@ -1927,10 +1921,133 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ li(at, Operand(debug_is_active));
+ __ lb(scratch1, MemOperand(at));
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&no_interpreter_frame, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::STUB)));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(fp, scratch2);
+ __ ld(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ ld(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ Dlsa(dst_reg, fp, scratch1, kPointerSizeLog2);
+ __ Daddu(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ Dlsa(src_reg, sp, args_reg, kPointerSizeLog2);
+ // Count receiver argument as well (not included in args_reg).
+ __ Daddu(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ Dsubu(src_reg, src_reg, Operand(kPointerSize));
+ __ Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
+ __ ld(tmp_reg, MemOperand(src_reg));
+ __ sd(tmp_reg, MemOperand(dst_reg));
+ __ bind(&entry);
+ __ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ __ mov(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -1970,8 +2087,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3);
} else {
Label convert_to_object, convert_receiver;
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ ld(a3, MemOperand(at));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2007,8 +2123,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a3, MemOperand(at));
}
__ bind(&done_convert);
@@ -2020,6 +2135,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
@@ -2038,18 +2157,22 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// Patch the receiver to [[BoundThis]].
{
__ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
- __ dsll(a4, a0, kPointerSizeLog2);
- __ daddu(a4, a4, sp);
+ __ Dlsa(a4, sp, a0, kPointerSizeLog2);
__ sd(at, MemOperand(a4));
}
@@ -2090,11 +2213,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ mov(a5, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, gt, a5, Operand(a0));
- __ dsll(a6, a4, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a6));
- __ dsll(a6, a5, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ sd(at, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
@@ -2111,11 +2232,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ dsll(a5, a4, kPointerSizeLog2);
- __ daddu(a5, a5, a2);
+ __ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a5));
- __ dsll(a5, a0, kPointerSizeLog2);
- __ daddu(a5, a5, sp);
+ __ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ sd(at, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2133,7 +2252,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2143,12 +2263,23 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(a1);
// Increase the arguments size to include the pushed function and the
@@ -2161,18 +2292,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
- __ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Overwrite the original receiver with the (original) target.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2253,11 +2379,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ mov(a5, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, ge, a5, Operand(a0));
- __ dsll(a6, a4, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a6));
- __ dsll(a6, a5, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ sd(at, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
@@ -2274,11 +2398,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ dsll(a5, a4, kPointerSizeLog2);
- __ daddu(a5, a5, a2);
+ __ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a5));
- __ dsll(a5, a0, kPointerSizeLog2);
- __ daddu(a5, a5, sp);
+ __ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ sd(at, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2357,8 +2479,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 2531d6b3f1..bde5531077 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -90,9 +90,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength);
+ Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -273,7 +272,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t1;
@@ -294,13 +293,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ Branch(&return_equal, eq, t0, Operand(HEAP_NUMBER_TYPE));
- __ And(t0, t0, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t0, Operand(zero_reg));
- }
} else {
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
@@ -310,13 +302,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ And(t0, t0, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t0, Operand(zero_reg));
- }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -510,45 +495,55 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
+ Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
__ And(at, a2, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
- __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
__ And(at, a3, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
- // Both are internalized strings. We already checked they weren't the same
- // pointer so they are not equal.
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in v0.
__ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
- __ and_(a0, a2, a3);
- __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ And(at, t0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&undetectable, ne, at, Operand(zero_reg));
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, ne, at, Operand(zero_reg));
+
+ __ GetInstanceType(a2, a2);
+ __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ GetInstanceType(a3, a3);
+ __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in v0.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In delay slot.
+
+ __ bind(&undetectable);
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, eq, at, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT);
- __ xori(v0, a0, 1 << Map::kIsUndetectable);
+ __ li(v0, Operand(EQUAL)); // In delay slot.
}
@@ -600,7 +595,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -739,8 +734,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -971,7 +965,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -985,7 +978,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(v0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
__ push(ra);
@@ -1001,7 +993,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -1073,8 +1064,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(s1, a2);
} else {
// Compute the argv pointer in a callee-saved register.
- __ dsll(s1, a0, kPointerSizeLog2);
- __ Daddu(s1, sp, s1);
+ __ Dlsa(s1, sp, a0, kPointerSizeLog2);
__ Dsubu(s1, s1, kPointerSize);
}
@@ -1090,47 +1080,77 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// a0 = argc
__ mov(s0, a0);
__ mov(s2, a1);
- // a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ int result_stack_size;
+ if (result_size() <= 2) {
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a1, s1);
+ result_stack_size = 0;
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Allocate additional space for the result.
+ result_stack_size =
+ ((result_size() * kPointerSize) + frame_alignment_mask) &
+ ~frame_alignment_mask;
+ __ Dsubu(sp, sp, Operand(result_stack_size));
+
+ // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a2, s1);
+ __ mov(a1, a0);
+ __ mov(a0, sp);
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- // This branch-and-link sequence is needed to find the current PC on mips,
- // saved to the ra register.
- // Use masm-> here instead of the double-underscore macro since extra
- // coverage code can interfere with the proper calculation of ra.
+ int kNumInstructionsToJump = 4;
Label find_ra;
- masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->mov(a1, s1);
- masm->bind(&find_ra);
-
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
- const int kNumInstructionsToJump = 5;
- masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
- masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ if (kArchVariant >= kMips64r6) {
+ __ addiupc(ra, kNumInstructionsToJump + 1);
+ } else {
+ // This branch-and-link sequence is needed to find the current PC on mips
+ // before r6, saved to the ra register.
+ __ bal(&find_ra); // bal exposes branch delay slot.
+ __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+ }
+ __ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ __ sd(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
- masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- masm->jalr(t9);
+ __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ __ jalr(t9);
// Set up sp in the delay slot.
- masm->daddiu(sp, sp, -kCArgsSlotsSize);
+ __ daddiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+ // Read result values stored on stack.
+ __ ld(a0, MemOperand(v0, 2 * kPointerSize));
+ __ ld(v1, MemOperand(v0, 1 * kPointerSize));
+ __ ld(v0, MemOperand(v0, 0 * kPointerSize));
+ }
+ // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -1246,14 +1266,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Move(kDoubleRegZero, 0.0);
// Load argv in s0 register.
- if (kMipsAbi == kN64) {
- __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
- } else { // Abi O32.
- // 5th parameter on stack for O32 abi.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
- __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
- }
+ __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
__ InitializeRootRegister();
@@ -1558,303 +1571,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smiGenerateReadElement.
- Label slow;
- __ JumpIfNotSmi(a1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor,
- eq,
- a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check index (a1) against formal parameters count limit passed in
- // through register a0. Use unsigned comparison to get negative
- // check for free.
- __ Branch(&slow, hs, a1, Operand(a0));
-
- // Read the argument from the stack and return it.
- __ dsubu(a3, a0, a1);
- __ SmiScale(a7, a3, kPointerSizeLog2);
- __ Daddu(a3, fp, Operand(a7));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, MemOperand(a3, kDisplacement));
-
- // Arguments adaptor case: Check index (a1) against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
- // Read the argument from the adaptor frame and return it.
- __ dsubu(a3, a0, a1);
- __ SmiScale(a7, a3, kPointerSizeLog2);
- __ Daddu(a3, a2, Operand(a7));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, MemOperand(a3, kDisplacement));
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(a1);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(a7, a2, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(a7));
- __ daddiu(a3, a4, StandardFrameConstants::kCallerSPOffset);
-
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // Registers used over whole function:
- // a5 : arguments count (tagged)
- // a6 : mapped parameter count (tagged)
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(a5, a2);
- __ Branch(USE_DELAY_SLOT, &try_allocate);
- __ mov(a6, a2); // In delay slot.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(t2));
- __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // a5 = argument count (tagged)
- // a6 = parameter count (tagged)
- // Compute the mapped parameter count = min(a6, a5) in a6.
- __ mov(a6, a2);
- __ Branch(&try_allocate, le, a6, Operand(a5));
- __ mov(a6, a5);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
- __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
- __ SmiScale(t1, a6, kPointerSizeLog2);
- __ daddiu(t1, t1, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(t1, t1, Operand(t2));
- __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into a4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ ld(a4, NativeContextMemOperand());
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
- __ ld(a4, MemOperand(a4, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
- __ ld(a4, MemOperand(a4, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a2 = argument count (smi-tagged)
- // a4 = address of arguments map (tagged)
- // a6 = mapped parameter count (tagged)
- __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(a1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sd(a1, FieldMemOperand(v0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(a5);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ sd(a5, FieldMemOperand(v0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, a4 will point there, otherwise
- // it will point to the backing store.
- __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a2 = argument count (tagged)
- // a4 = address of parameter map or backing store (tagged)
- // a6 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
- // Move backing store address to a1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a1, a4);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
-
- __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
- __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
- __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(a5, a4, Operand(t2));
- __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
- __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(a5, a6);
- __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Dsubu(t1, t1, Operand(a6));
- __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(a1, a4, Operand(t2));
- __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
-
- // a1 = address of backing store (tagged)
- // a4 = address of parameter map (tagged)
- // a0 = temporary scratch (a.o., for address calculation)
- // t1 = loop variable (tagged)
- // a7 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
- __ SmiScale(a0, a5, kPointerSizeLog2);
- __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Daddu(t2, a4, a0);
- __ sd(t1, MemOperand(t2));
- __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Daddu(t2, a1, a0);
- __ sd(a7, MemOperand(t2));
- __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
-
- // Restore t1 = argument count (tagged).
- __ ld(a5, FieldMemOperand(v0, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // v0 = address of new object (tagged)
- // a1 = address of backing store (tagged)
- // a5 = argument count (tagged)
- // a6 = mapped parameter count (tagged)
- // t1 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
- __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Dsubu(a3, a3, Operand(t2));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Dsubu(a3, a3, Operand(kPointerSize));
- __ ld(a4, MemOperand(a3, 0));
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(t1, a1, Operand(t2));
- __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, a6, Operand(a5));
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // a5 = argument count (tagged)
- __ bind(&runtime);
- __ Push(a1, a3, a5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in ra.
Label slow;
@@ -1878,122 +1594,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
- __ Branch(&try_allocate, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(at, a2, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(at));
- __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(t1, a2);
- __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
-
- __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(t1, v0, a4, a5, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
-
- __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(a2);
- __ sd(a2,
- FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- Label done;
- __ Branch(&done, eq, a2, Operand(zero_reg));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
- __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ sd(a2, FieldMemOperand(a4, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up a4 to point to the first array slot.
- __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement a3 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Daddu(a3, a3, Operand(-kPointerSize));
- __ ld(a5, MemOperand(a3));
- // Post-increment a4 with kPointerSize on each iteration.
- __ sd(a5, MemOperand(a4));
- __ Daddu(a4, a4, Operand(kPointerSize));
- __ Dsubu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
-
- // Return.
- __ bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // a4 : rest parameter index (tagged)
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a5, MemOperand(a0, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a5,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ ld(a2, MemOperand(a0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(at, a2, kPointerSizeLog2);
-
- __ Daddu(a3, a0, Operand(at));
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a2, a3, a4);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2182,7 +1782,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
+ const int kParameterRegisters = 8;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -2203,58 +1803,28 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
- if (kMipsAbi == kN64) {
- // Argument 9: Pass current isolate address.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(a0, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a7, Operand(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ li(a0, Operand(address_of_regexp_stack_memory_address));
- __ ld(a0, MemOperand(a0, 0));
- __ li(a2, Operand(address_of_regexp_stack_memory_size));
- __ ld(a2, MemOperand(a2, 0));
- __ daddu(a6, a0, a2);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a5, zero_reg);
-
- // Argument 5: static offsets vector buffer.
- __ li(a4, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate())));
- } else { // O32.
- DCHECK(kMipsAbi == kO32);
-
- // Argument 9: Pass current isolate address.
- // CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(a0, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a0, Operand(1));
- __ sd(a0, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ li(a0, Operand(address_of_regexp_stack_memory_address));
- __ ld(a0, MemOperand(a0, 0));
- __ li(a2, Operand(address_of_regexp_stack_memory_size));
- __ ld(a2, MemOperand(a2, 0));
- __ daddu(a0, a0, a2);
- __ sd(a0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a0, zero_reg);
- __ sd(a0, MemOperand(sp, 2 * kPointerSize));
+ // Argument 9: Pass current isolate address.
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(a0, MemOperand(sp, 1 * kPointerSize));
- // Argument 5: static offsets vector buffer.
- __ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate())));
- __ sd(a0, MemOperand(sp, 1 * kPointerSize));
- }
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ li(a7, Operand(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ ld(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ ld(a2, MemOperand(a2, 0));
+ __ daddu(a6, a0, a2);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(a5, zero_reg);
+
+ // Argument 5: static offsets vector buffer.
+ __ li(
+ a4,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
// For arguments 4 and 3 get string length, calculate start of string data
// and calculate the shift of the index (0 for one_byte and 1 for two byte).
@@ -2719,7 +2289,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
__ bind(&call_function);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -2759,7 +2330,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
__ bind(&call);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -3163,8 +2734,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ dsll(a4, a3, 1);
- __ Daddu(a5, a5, a4);
+ __ Dlsa(a5, a5, a3, 1);
// Locate first character of result.
__ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -3291,6 +2861,39 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3463,18 +3066,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
- __ AssertSmi(a1);
- __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
- __ AssertSmi(a0);
- }
- __ Ret(USE_DELAY_SLOT);
- __ Dsubu(v0, a1, a0);
+ if (!Token::IsEqualityOp(op())) {
+ __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ Dsubu(v0, a1, a0);
__ bind(&miss);
GenerateMiss(masm);
@@ -3572,7 +3171,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3802,8 +3401,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3899,16 +3496,14 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ dsll(at, index, 1);
- __ Daddu(index, index, at); // index *= 3.
+ __ Dlsa(index, index, index, 1); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
- __ dsll(scratch0, index, kPointerSizeLog2);
- __ Daddu(tmp, properties, scratch0);
+ __ Dlsa(tmp, properties, index, kPointerSizeLog2);
__ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
@@ -3997,13 +3592,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
-
- __ dsll(at, scratch2, 1);
- __ Daddu(scratch2, scratch2, at);
+ __ Dlsa(scratch2, scratch2, scratch2, 1);
// Check if the key is identical to the name.
- __ dsll(at, scratch2, kPointerSizeLog2);
- __ Daddu(scratch2, elements, at);
+ __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
__ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
__ Branch(done, eq, name, Operand(at));
}
@@ -4084,14 +3676,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
- __ mov(at, index);
- __ dsll(index, index, 1);
- __ Daddu(index, index, at);
-
+ __ Dlsa(index, index, index, 1);
STATIC_ASSERT(kSmiTagSize == 1);
- __ dsll(index, index, kPointerSizeLog2);
- __ Daddu(index, index, dictionary);
+ __ Dlsa(index, dictionary, index, kPointerSizeLog2);
__ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
@@ -4191,11 +3779,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -5076,8 +4661,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ dsll(at, a0, kPointerSizeLog2);
- __ Daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
@@ -5170,6 +4754,609 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+ __ AssertReceiver(a3);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ GetObjectType(a3, a2, a2);
+ __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it's in fact a map.
+ __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &new_object);
+ __ GetObjectType(a2, a0, a0);
+ __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&new_object, ne, a0, Operand(a1));
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ sd(a2, MemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a3, MemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, MemOperand(v0, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ Daddu(a1, v0, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- v0 : result (untagged)
+ // -- a1 : result fields (untagged)
+ // -- a5 : result end (untagged)
+ // -- a2 : initial map
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+ __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(a1, a5, a0);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ Daddu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ dsll(a4, a4, kPointerSizeLog2);
+ __ Dsubu(a4, a5, a4);
+ __ InitializeFieldsWithFiller(a1, a4, a0);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(a1, a5, a0);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Daddu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(v0, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(v0);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
+ __ SmiTag(a4);
+ __ Push(a2, a4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a2);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Dsubu(v0, v0, Operand(kHeapObjectTag));
+ __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Dlsa(a5, v0, a5, kPointerSizeLog2);
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(a1, a3);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_rest_parameters, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ SmiLoadUntag(
+ a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Dsubu(a0, a0, Operand(a1));
+ __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in v0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
+ __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
+ __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
+ __ Move(a1, Smi::FromInt(0));
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ Dlsa(a2, a2, a0, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ Dlsa(a1, a1, a0, kPointerSizeLog2);
+ __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Compute arguments.length in a4.
+ __ SmiTag(a4, a0);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ Dlsa(a1, a3, a0, kPointerSizeLog2);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ ld(at, MemOperand(a2, 0 * kPointerSize));
+ __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Dsubu(a2, a2, Operand(1 * kPointerSize));
+ __ Daddu(a3, a3, Operand(1 * kPointerSize));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in a3.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
+ __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
+ __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lsa(a3, fp, a2, kPointerSizeLog2);
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ SmiTag(a2);
+
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // Registers used over whole function:
+ // a5 : arguments count (tagged)
+ // a6 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a5, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(a6, a2); // In delay slot.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a4, a4, Operand(t2));
+ __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // a5 = argument count (tagged)
+ // a6 = parameter count (tagged)
+ // Compute the mapped parameter count = min(a6, a5) in a6.
+ __ mov(a6, a2);
+ __ Branch(&try_allocate, le, a6, Operand(a5));
+ __ mov(a6, a5);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
+ __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
+ __ SmiScale(t1, a6, kPointerSizeLog2);
+ __ daddiu(t1, t1, kParameterMapHeaderSize);
+ __ bind(&param_map_size);
+
+ // 2. Backing store.
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(t1, t1, Operand(t2));
+ __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into a4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ ld(a4, NativeContextMemOperand());
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
+ __ ld(a4, MemOperand(a4, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
+ __ ld(a4, MemOperand(a4, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (smi-tagged)
+ // a4 = address of arguments map (tagged)
+ // a6 = mapped parameter count (tagged)
+ __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(a1);
+ __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(a5);
+ __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, a4 will point there, otherwise
+ // it will point to the backing store.
+ __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (tagged)
+ // a4 = address of parameter map or backing store (tagged)
+ // a6 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a1, a4);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(a5, a4, Operand(t2));
+ __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(a5, a6);
+ __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Dsubu(t1, t1, Operand(a6));
+ __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a1, a4, Operand(t2));
+ __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
+
+ // a1 = address of backing store (tagged)
+ // a4 = address of parameter map (tagged)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
+ // a7 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
+ __ SmiScale(a0, a5, kPointerSizeLog2);
+ __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Daddu(t2, a4, a0);
+ __ sd(t1, MemOperand(t2));
+ __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Daddu(t2, a1, a0);
+ __ sd(a7, MemOperand(t2));
+ __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+
+ // Restore t1 = argument count (tagged).
+ __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // a5 = argument count (tagged)
+ // a6 = mapped parameter count (tagged)
+ // t1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Dsubu(a3, a3, Operand(t2));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Dsubu(a3, a3, Operand(kPointerSize));
+ __ ld(a4, MemOperand(a3, 0));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(t1, a1, Operand(t2));
+ __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, a6, Operand(a5));
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // a5 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(a1, a3, a5);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&arguments_adaptor, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ {
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Dlsa(a2, a2, a0, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ Branch(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ SmiLoadUntag(
+ a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Dlsa(a2, a3, a0, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Dlsa(a1, a1, a0, kPointerSizeLog2);
+ __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Compute arguments.length in a4.
+ __ SmiTag(a4, a0);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ Dlsa(a1, a3, a0, kPointerSizeLog2);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ ld(at, MemOperand(a2, 0 * kPointerSize));
+ __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Dsubu(a2, a2, Operand(1 * kPointerSize));
+ __ Daddu(a3, a3, Operand(1 * kPointerSize));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in a3.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
+ __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
+ __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
+ __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = cp;
Register slot_reg = a2;
@@ -5183,8 +5370,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell value at the specified slot.
- __ dsll(at, slot_reg, kPointerSizeLog2);
- __ Daddu(at, at, Operand(context_reg));
+ __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ ld(result_reg, ContextMemOperand(at, 0));
__ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
@@ -5222,8 +5408,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell at the specified slot.
- __ dsll(at, slot_reg, kPointerSizeLog2);
- __ Daddu(at, at, Operand(context_reg));
+ __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ ld(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
@@ -5451,11 +5636,10 @@ static void CallApiFunctionAndReturn(
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- a4 : call_data
@@ -5491,8 +5675,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// Save context, callee and call data.
__ Push(context, callee, call_data);
- // Load context from callee.
- __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // Load context from callee.
+ __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
Register scratch = call_data;
if (!call_data_undefined) {
@@ -5577,7 +5763,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5585,41 +5771,49 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- a2 : api_function_address
+ // -- a2 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
DCHECK(api_function_address.is(a2));
- __ mov(a0, sp); // a0 = Handle<Name>
- __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // a1 (internal::Object** args_) as the data.
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
__ sd(a1, MemOperand(sp, 1 * kPointerSize));
- __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ __ Daddu(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kInvalidStackOffset,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 022426e7d7..c8cde97883 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -1078,8 +1078,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ And(at, result, Operand(kStringEncodingMask));
__ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
- __ dsll(at, index, 1);
- __ Daddu(at, string, at);
+ __ Dlsa(at, string, index, 1);
__ lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&one_byte);
@@ -1151,8 +1150,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ dsll(at, temp2, 3);
- __ Daddu(temp3, temp3, Operand(at));
+ __ Dlsa(temp3, temp3, temp2, 3);
__ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
__ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 226e3ed5ba..57e947b138 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -45,27 +45,6 @@ enum ArchVariants {
#error Unknown endianness
#endif
-// TODO(plind): consider deriving ABI from compiler flags or build system.
-
-// ABI-dependent definitions are made with #define in simulator-mips64.h,
-// so the ABI choice must be available to the pre-processor. However, in all
-// other cases, we should use the enum AbiVariants with normal if statements.
-
-#define MIPS_ABI_N64 1
-// #define MIPS_ABI_O32 1
-
-// The only supported Abi's are O32, and n64.
-enum AbiVariants {
- kO32,
- kN64 // Use upper case N for 'n64' ABI to conform to style standard.
-};
-
-#ifdef MIPS_ABI_N64
-static const AbiVariants kMipsAbi = kN64;
-#else
-static const AbiVariants kMipsAbi = kO32;
-#endif
-
// TODO(plind): consider renaming these ...
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -840,6 +819,7 @@ enum CheckForInexactConversion {
kDontCheckForInexactConversion
};
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// -----------------------------------------------------------------------------
// Hints.
@@ -1184,7 +1164,7 @@ class Instruction {
// MIPS assembly various constants.
// C/C++ argument slots size.
-const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
+const int kCArgSlotCount = 0;
// TODO(plind): below should be based on kPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
@@ -1226,6 +1206,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
case SPECIAL3:
switch (FunctionFieldRaw()) {
case INS:
+ case DINS:
case EXT:
case DEXT:
case DEXTM:
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 8daba04ac7..ec610f0281 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -80,27 +80,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -119,8 +98,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on MIPS in the input frame.
return false;
}
@@ -188,15 +166,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
- if (kMipsAbi == kN64) {
- // a4: already has fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
- } else { // O32 abi.
- // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
- __ sd(a4, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(a5, CFunctionArgumentOperand(6)); // Isolate.
- }
+ // a4: already has fp-to-sp delta.
+ __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
@@ -273,8 +245,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
- __ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
+ __ Dlsa(a1, a4, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index c5c1311d94..73df66ea8e 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -54,20 +54,6 @@ const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
-const Register ArgumentsAccessReadDescriptor::index() { return a1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return a1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return a2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return a4; }
-
-
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -96,6 +82,32 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -113,6 +125,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return a0; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
@@ -165,13 +181,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a3};
@@ -407,6 +416,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -418,7 +435,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -430,7 +446,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 7b73ac74e4..b49fa76e06 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -161,9 +161,9 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
- And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- Branch(branch, cc, scratch,
- Operand(ExternalReference::new_space_start(isolate())));
+ const int mask =
+ 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
+ CheckPageFlag(object, scratch, mask, cc, branch);
}
@@ -371,6 +371,67 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(a1));
+ DCHECK(code_entry.is(a4));
+ DCHECK(scratch.is(a5));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
+ ld(at, MemOperand(scratch));
+ Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
+ Operand(code_entry));
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | ra.bit());
+
+ int argument_count = 3;
+
+ PrepareCallCFunction(argument_count, code_entry);
+
+ Move(a0, js_function);
+ Move(a1, dst);
+ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ MultiPop(kJSCallerSaved | ra.bit());
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
@@ -503,16 +564,14 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = ~hash + (hash << 15);
// The algorithm uses 32-bit integer values.
nor(scratch, reg0, zero_reg);
- sll(at, reg0, 15);
- addu(reg0, scratch, at);
+ Lsa(reg0, scratch, reg0, 15);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
- sll(at, reg0, 2);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 2);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
@@ -520,8 +579,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash * 2057;
sll(scratch, reg0, 11);
- sll(at, reg0, 3);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 3);
addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16);
@@ -581,12 +639,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
- dsll(at, reg2, 1); // 2x.
- daddu(reg2, reg2, at); // reg2 = reg2 * 3.
+ Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
- dsll(at, reg2, kPointerSizeLog2);
- daddu(reg2, elements, at);
+ Dlsa(reg2, elements, reg2, kPointerSizeLog2);
ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
@@ -1302,6 +1358,35 @@ void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
}
}
+static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
+ if ((imm >> (bitnum - 1)) & 0x1) {
+ imm = (imm >> bitnum) + 1;
+ } else {
+ imm = imm >> bitnum;
+ }
+ return imm;
+}
+
+bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
+ bool higher_bits_sign_extended = false;
+ if (is_int16(j.imm64_)) {
+ daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
+ } else if (!(j.imm64_ & kHiMask)) {
+ ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
+ } else if (!(j.imm64_ & kImm16Mask)) {
+ lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+ if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
+ higher_bits_sign_extended = true;
+ }
+ } else {
+ lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ & kImm16Mask));
+ if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
+ higher_bits_sign_extended = true;
+ }
+ }
+ return higher_bits_sign_extended;
+}
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
@@ -1309,50 +1394,57 @@ void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int32(j.imm64_)) {
- if (is_int16(j.imm64_)) {
- daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
- } else if (!(j.imm64_ & kHiMask)) {
- ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
- } else if (!(j.imm64_ & kImm16Mask)) {
- lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
- } else {
- lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm64_ & kImm16Mask));
- }
+ LiLower32BitHelper(rd, j);
} else {
- if (is_int48(j.imm64_)) {
- if ((j.imm64_ >> 32) & kImm16Mask) {
- lui(rd, (j.imm64_ >> 32) & kImm16Mask);
- if ((j.imm64_ >> 16) & kImm16Mask) {
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
- }
- } else {
- ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+ if (kArchVariant == kMips64r6) {
+ int64_t imm = j.imm64_;
+ bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
+ imm = ShiftAndFixSignExtension(imm, 32);
+ // If LUI writes 1s to higher bits, we need both DAHI/DATI.
+ if ((imm & kImm16Mask) ||
+ (higher_bits_sign_extended && (j.imm64_ > 0))) {
+ dahi(rd, imm & kImm16Mask);
}
- dsll(rd, rd, 16);
- if (j.imm64_ & kImm16Mask) {
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ imm = ShiftAndFixSignExtension(imm, 16);
+ if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
+ (higher_bits_sign_extended && (j.imm64_ > 0))) {
+ dati(rd, imm & kImm16Mask);
}
} else {
- lui(rd, (j.imm64_ >> 48) & kImm16Mask);
- if ((j.imm64_ >> 32) & kImm16Mask) {
- ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
- }
- if ((j.imm64_ >> 16) & kImm16Mask) {
+ if (is_int48(j.imm64_)) {
+ if ((j.imm64_ >> 32) & kImm16Mask) {
+ lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+ if ((j.imm64_ >> 16) & kImm16Mask) {
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ }
+ } else {
+ ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+ }
dsll(rd, rd, 16);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
if (j.imm64_ & kImm16Mask) {
- dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
- } else {
- dsll(rd, rd, 16);
}
} else {
- if (j.imm64_ & kImm16Mask) {
- dsll32(rd, rd, 0);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+ if ((j.imm64_ >> 32) & kImm16Mask) {
+ ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ }
+ if ((j.imm64_ >> 16) & kImm16Mask) {
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ if (j.imm64_ & kImm16Mask) {
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ } else {
+ dsll(rd, rd, 16);
+ }
} else {
- dsll32(rd, rd, 0);
+ if (j.imm64_ & kImm16Mask) {
+ dsll32(rd, rd, 0);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ } else {
+ dsll32(rd, rd, 0);
+ }
}
}
}
@@ -1371,12 +1463,32 @@ void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
} else {
- lui(rd, (j.imm64_ >> 48) & kImm16Mask);
- ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
- dsll(rd, rd, 16);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
- dsll(rd, rd, 16);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ if (kArchVariant == kMips64r6) {
+ int64_t imm = j.imm64_;
+ lui(rd, (imm >> kLuiShift) & kImm16Mask);
+ if (imm & kImm16Mask) {
+ ori(rd, rd, (imm & kImm16Mask));
+ }
+ if ((imm >> 31) & 0x1) {
+ imm = (imm >> 32) + 1;
+ } else {
+ imm = imm >> 32;
+ }
+ dahi(rd, imm & kImm16Mask);
+ if ((imm >> 15) & 0x1) {
+ imm = (imm >> 16) + 1;
+ } else {
+ imm = imm >> 16;
+ }
+ dati(rd, imm & kImm16Mask);
+ } else {
+ lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ }
}
}
@@ -1596,6 +1708,22 @@ void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
bind(&conversion_done);
}
+void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ mfc1(t8, fs);
+ Cvt_s_uw(fd, t8);
+}
+
+void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ // Zero extend int32 in rs.
+ Dext(t9, rs, 0, 32);
+ dmtc1(t9, fd);
+ cvt_s_l(fd, fd);
+}
void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
@@ -1672,6 +1800,12 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch) {
+ Trunc_uw_s(fs, t8, scratch);
+ mtc1(t8, fd);
+}
+
void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_d(fs, t8, scratch, result);
@@ -1738,6 +1872,35 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
bind(&done);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+ FPURegister scratch) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
+
+ // Load 2^31 into scratch as its float representation.
+ li(at, 0x4F000000);
+ mtc1(at, scratch);
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ BranchF32(&simple_convert, NULL, lt, fd, scratch);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_w_s(scratch, scratch);
+ mfc1(rs, scratch);
+ Or(rs, rs, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_s(scratch, fd);
+ mfc1(rs, scratch);
+
+ bind(&done);
+}
void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
@@ -3714,7 +3877,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!AreAliased(result, scratch1, scratch2, t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3792,8 +3955,8 @@ void MacroAssembler::Allocate(Register object_size, Register result,
}
// |object_size| and |result_end| may overlap, other registers must not.
- DCHECK(!AreAliased(object_size, result, scratch, t9));
- DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+ DCHECK(!AreAliased(result_end, result, scratch, t9, at));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3839,8 +4002,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- dsll(result_end, object_size, kPointerSizeLog2);
- Daddu(result_end, result, result_end);
+ Dlsa(result_end, result, object_size, kPointerSizeLog2);
} else {
Daddu(result_end, result, Operand(object_size));
}
@@ -4365,7 +4527,7 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
- const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+ const DoubleRegister fparg2 = f13;
if (src2.is(f12)) {
DCHECK(!src1.is(fparg2));
Move(fparg2, src2);
@@ -4479,7 +4641,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -5230,18 +5392,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, a1);
- InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -5338,9 +5488,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -5596,8 +5746,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
daddu(sp, sp, argument_count);
} else {
- dsll(t8, argument_count, kPointerSizeLog2);
- daddu(sp, sp, t8);
+ Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
}
}
@@ -5880,6 +6029,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5969,8 +6129,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
}
-
-static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+static const int kRegisterPassedArguments = 8;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
@@ -6185,8 +6344,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- dsll(t8, t8, Bitmap::kBytesPerCellLog2);
- Daddu(bitmap_reg, bitmap_reg, t8);
+ Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
li(t8, Operand(1));
dsllv(mask_reg, t8, mask_reg);
}
@@ -6251,7 +6409,8 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = a5;
Register empty_fixed_array_value = a6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -6265,6 +6424,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Branch(
call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
jmp(&start);
bind(&next);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 31ed8a32e1..7f44ab9cc5 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -15,6 +15,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_v0};
const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
@@ -235,6 +236,11 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
@@ -385,7 +391,7 @@ class MacroAssembler: public Assembler {
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
@@ -393,7 +399,7 @@ class MacroAssembler: public Assembler {
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -455,6 +461,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
@@ -688,6 +699,7 @@ class MacroAssembler: public Assembler {
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline bool LiLower32BitHelper(Register rd, Operand j);
inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
@@ -821,6 +833,10 @@ class MacroAssembler: public Assembler {
void Cvt_d_ul(FPURegister fd, FPURegister fs);
void Cvt_d_ul(FPURegister fd, Register rs);
+ // Convert unsigned word to float.
+ void Cvt_s_uw(FPURegister fd, FPURegister fs);
+ void Cvt_s_uw(FPURegister fd, Register rs);
+
// Convert unsigned long to float.
void Cvt_s_ul(FPURegister fd, FPURegister fs);
void Cvt_s_ul(FPURegister fd, Register rs);
@@ -837,6 +853,10 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
// Convert double to unsigned long.
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
Register result = no_reg);
@@ -1121,6 +1141,11 @@ class MacroAssembler: public Assembler {
Register map,
Register type_reg);
+ void GetInstanceType(Register object_map, Register object_instance_type) {
+ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ }
+
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
@@ -1449,10 +1474,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1644,6 +1665,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1747,7 +1771,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
@@ -1836,9 +1860,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
+ void InNewSpace(Register object, Register scratch,
+ Condition cond, // ne for new space, eq otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
@@ -1901,7 +1924,36 @@ class CodePatcher {
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
-
+template <typename Func>
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ // Ensure that dd-ed labels following this instruction use 8 bytes aligned
+ // addresses.
+ if (kArchVariant >= kMips64r6) {
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
+ // Opposite of Align(8) as we have odd number of instructions in this case.
+ if ((pc_offset() & 7) == 0) {
+ nop();
+ }
+ addiupc(at, 5);
+ dlsa(at, at, index, kPointerSizeLog2);
+ ld(at, MemOperand(at));
+ } else {
+ Label here;
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
+ Align(8);
+ bal(&here);
+ dsll(at, index, kPointerSizeLog2); // Branch delay slot.
+ bind(&here);
+ daddu(at, at, ra);
+ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ }
+ jr(at);
+ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ dd(GetLabelFunction(index));
+ }
+}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 7fa96442f9..70c06c885f 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -16,6 +16,7 @@
#include "src/mips64/constants-mips64.h"
#include "src/mips64/simulator-mips64.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
// Only build the simulator if not compiling for real MIPS hardware.
#if defined(USE_SIMULATOR)
@@ -520,7 +521,8 @@ void MipsDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", static_cast<int>(value >> 32));
@@ -1159,7 +1161,7 @@ double Simulator::get_fpu_register_double(int fpureg) const {
// from a0-a3 or f12 and f13 (n64), or f14 (O32).
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (!IsMipsSoftFloatABI) {
- const int fparg2 = (kMipsAbi == kN64) ? 13 : 14;
+ const int fparg2 = 13;
*x = get_fpu_register_double(12);
*y = get_fpu_register_double(fparg2);
*z = static_cast<int32_t>(get_register(a2));
@@ -1964,11 +1966,6 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-struct ObjectPair {
- Object* x;
- Object* y;
-};
-
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
int64_t arg1,
int64_t arg2,
@@ -1976,6 +1973,9 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
int64_t arg4,
int64_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4);
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -2010,15 +2010,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int64_t arg3 = get_register(a3);
int64_t arg4, arg5;
- if (kMipsAbi == kN64) {
- arg4 = get_register(a4); // Abi n64 register a4.
- arg5 = get_register(a5); // Abi n64 register a5.
- } else { // Abi O32.
- int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
- // Args 4 and 5 are on the stack after the reserved space for args 0..3.
- arg4 = stack_pointer[4];
- arg5 = stack_pointer[5];
- }
+ arg4 = get_register(a4); // Abi n64 register a4.
+ arg5 = get_register(a5); // Abi n64 register a5.
+
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -2175,7 +2169,30 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ // builtin call returning ObjectTriple.
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host triple returning runtime function %p "
+ "args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
+ ", %016" PRIx64 "\n",
+ FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ }
+ // arg0 is a hidden argument pointing to the return location, so don't
+ // pass it to the target function.
+ ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ }
+ // Return is passed back in address pointed to by hidden first argument.
+ ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+ *sim_result = result;
+ set_register(v0, arg0);
} else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
@@ -2316,6 +2333,89 @@ void Simulator::SignalException(Exception e) {
static_cast<int>(e));
}
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ result = a;
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
+ // negates the result.
+ result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
// Handle execution based on instruction types.
@@ -2600,71 +2700,19 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
break;
case SEL:
DCHECK(kArchVariant == kMips64r6);
@@ -2809,71 +2857,19 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
- }
+ set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
break;
case ADD_D:
set_fpu_register_double(fd_reg(), fs + ft);
@@ -4777,7 +4773,7 @@ void Simulator::CallInternal(byte* entry) {
int64_t Simulator::Call(byte* entry, int argument_count, ...) {
- const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+ const int kRegisterPassedArguments = 8;
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments.
@@ -4789,14 +4785,12 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(a2, va_arg(parameters, int64_t));
set_register(a3, va_arg(parameters, int64_t));
- if (kMipsAbi == kN64) {
- // Up to eight arguments passed in registers in N64 ABI.
- // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
- if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
- if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
- if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
- if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
- }
+ // Up to eight arguments passed in registers in N64 ABI.
+ // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
+ if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
+ if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
+ if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
+ if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
// Remaining arguments passed on stack.
int64_t original_stack = get_register(sp);
@@ -4831,7 +4825,7 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
double Simulator::CallFP(byte* entry, double d0, double d1) {
if (!IsMipsSoftFloatABI) {
- const FPURegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+ const FPURegister fparg2 = f13;
set_fpu_register_double(f12, d0);
set_fpu_register_double(fparg2, d1);
} else {
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 1d156d860f..7f60a74639 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -31,7 +31,6 @@ namespace internal {
// should act as a function matching the type arm_regexp_matcher.
// The fifth (or ninth) argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#ifdef MIPS_ABI_N64
typedef int (*mips_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
@@ -48,26 +47,6 @@ typedef int (*mips_regexp_matcher)(String* input,
(FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
-#else // O32 Abi.
-
-typedef int (*mips_regexp_matcher)(String* input,
- int32_t start_offset,
- const byte* input_start,
- const byte* input_end,
- void* return_address,
- int* output,
- int32_t output_size,
- Address stack_base,
- int32_t direct_call,
- Isolate* isolate);
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
- p7, p8))
-
-#endif // MIPS_ABI_N64
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -516,18 +495,11 @@ class Simulator {
reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
-#ifdef MIPS_ABI_N64
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->Call( \
entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
NULL, p8))
-#else // Must be O32 Abi.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- static_cast<int>(Simulator::current(isolate)->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-#endif // MIPS_ABI_N64
// The simulator has its own stack. Thus it has a different stack limit from