summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/s390
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2017-09-12 11:34:59 +0200
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:15:18 +0200
commitd82e1075dbc2cec2d6598ade10c1f43805f690fd (patch)
treeccd242b9b491dfc341d1099fe11b0ef528839877 /deps/v8/src/builtins/s390
parentb4b7ac6ae811b2b5a3082468115dfb5a5246fe3f (diff)
downloadnode-new-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.gz
deps: update V8 to 6.1.534.36
PR-URL: https://github.com/nodejs/node/pull/14730 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/builtins/s390')
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc1017
1 files changed, 346 insertions, 671 deletions
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 2148f11105..f6bd0af3bf 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -226,7 +226,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -376,7 +376,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
@@ -424,22 +424,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
- // Checking whether the queued function is ready for install is optional,
- // since we come across interrupts and stack checks elsewhere. However,
- // not checking may delay installing ready functions, and always checking
- // would be quite expensive. A good compromise is to first check against
- // stack limit as a cue for an interrupt signal.
- Label ok;
- __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
- __ bge(&ok, Label::kNear);
-
- GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
-
- __ bind(&ok);
- GenerateTailCallToSharedCode(masm);
-}
-
namespace {
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -543,16 +527,13 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6,
- FunctionKind::kDerivedConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kDerivedConstructorMask, r0);
__ bne(&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
r6, r7);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ __ Call(masm->isolate()->builtins()->FastNewObject(),
RelocInfo::CODE_TARGET);
__ b(&post_instantiation_deopt_entry);
@@ -669,10 +650,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6,
- FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
__ beq(&use_receiver);
} else {
__ b(&use_receiver);
@@ -726,35 +704,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- r2 : the value to pass to the generator
// -- r3 : the JSGeneratorObject to resume
// -- r4 : the resume mode (tagged)
- // -- r5 : the SuspendFlags of the earlier suspend call (tagged)
// -- lr : return address
// -----------------------------------
- __ SmiUntag(r5);
- __ AssertGeneratorObject(r3, r5);
+ __ AssertGeneratorObject(r3);
// Store input value into generator object.
- Label async_await, done_store_input;
-
- __ tmll(r5, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ b(Condition(1), &async_await);
-
__ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&async_await);
- __ StoreP(
- r2,
- FieldMemOperand(r3, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset),
- r0);
- __ RecordWriteField(r3, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- r2, r5, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ b(&done_store_input);
-
- __ bind(&done_store_input);
- // `r5` no longer holds SuspendFlags
// Store resume mode into generator object.
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
@@ -811,7 +769,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CmpP(r2, Operand::Zero());
__ beq(&done_loop);
#else
- __ SmiUntag(r2);
__ LoadAndTestP(r2, r2);
__ beq(&done_loop);
#endif
@@ -913,7 +870,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
- ExternalReference context_address(Isolate::kContextAddress,
+ ExternalReference context_address(IsolateAddressId::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
@@ -1036,6 +993,118 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count);
}
+// Tail-call |function_id| if |smi_entry| == |marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register smi_entry,
+ OptimizationMarker marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ bne(&no_match);
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register feedback_vector,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee if needed, and caller)
+ // -- r3 : new target (preserved for callee if needed, and caller)
+ // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -----------------------------------
+ DCHECK(
+ !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
+
+ Label optimized_code_slot_is_cell, fallthrough;
+
+ Register closure = r3;
+ Register optimized_code_entry = scratch1;
+
+ const int kOptimizedCodeCellOffset =
+ FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize;
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
+
+ // Check if the code entry is a Smi. If yes, we interpret it as an
+ // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // object.
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+
+ {
+ // Optimized code slot is a Smi optimization marker.
+
+ // Fall through if no optimization trigger.
+ __ CmpSmiLiteral(optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kNone), r0);
+ __ beq(&fallthrough);
+
+ TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(
+ masm, optimized_code_entry,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ {
+ // Otherwise, the marker is InOptimizationQueue.
+ if (FLAG_debug_code) {
+ __ CmpSmiLiteral(
+ optimized_code_entry,
+ Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
+ __ Assert(eq, kExpectedOptimizationSentinel);
+ }
+
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
+ __ bge(&fallthrough, Label::kNear);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
+ }
+ }
+
+ {
+ // Optimized code slot is a WeakCell.
+ __ bind(&optimized_code_slot_is_cell);
+
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(optimized_code_entry, &fallthrough);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ Label found_deoptimized_code;
+ __ LoadW(scratch2, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&found_deoptimized_code);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, closure,
+ scratch2, scratch3, feedback_vector);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code slot contains deoptimized code, evict it and re-enter the
+ // closure's code.
+ __ bind(&found_deoptimized_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ }
+
+ // Fall-through if the optimized code cell is clear and there is no
+ // optimization marker.
+ __ bind(&fallthrough);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -1055,43 +1124,35 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ Register closure = r3;
+ Register feedback_vector = r4;
+
+ // Load the feedback vector from the closure.
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ // Read off the optimized code slot in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r3);
-
- // First check if there is optimized code in the feedback vector which we
- // could call instead.
- Label switch_to_optimized_code;
-
- Register optimized_code_entry = r6;
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
- __ LoadP(
- optimized_code_entry,
- FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+ __ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- Label array_done;
- Register debug_info = r4;
- DCHECK(!debug_info.is(r2));
- __ LoadP(debug_info,
- FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
- __ TestIfSmi(debug_info);
- __ beq(&array_done);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
- __ bind(&array_done);
+ __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
+ __ TestIfSmi(r6);
+ __ bne(&maybe_load_debug_bytecode_array);
+ __ bind(&bytecode_array_loaded);
// Check whether we should continue to use the interpreter.
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
@@ -1102,15 +1163,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
- __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
- __ LoadP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ LoadP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
- __ StoreP(r1, FieldMemOperand(
- r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
- FeedbackVector::kHeaderSize));
+ __ StoreP(
+ r1, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
@@ -1184,40 +1245,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4);
__ Ret();
+ // Load debug copy of the bytecode array if it exists.
+ // kInterpreterBytecodeArrayRegister is already loaded with
+ // SharedFunctionInfo::kFunctionDataOffset.
+ Label done;
+ __ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ip);
+ __ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
+ __ beq(&done);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
+ __ bind(&done);
+ __ b(&bytecode_array_loaded);
+
// If the shared code is no longer this entry trampoline, then the underlying
// function has been switched to a different kind of code and we heal the
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
__ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(r3, r6, r7);
+ __ StoreP(r6, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, r6, r7);
__ JumpToJSEntry(r6);
-
- // If there is optimized code on the type feedback vector, check if it is good
- // to run, and if so, self heal the closure and call the optimized code.
- __ bind(&switch_to_optimized_code);
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- Label gotta_call_runtime;
-
- // Check if the optimized code is marked for deopt.
- __ LoadlW(r7, FieldMemOperand(optimized_code_entry,
- Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Optimized code is good, get it into the closure and link the closure into
- // the optimized functions list, then tail call the optimized code.
- ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
- r4);
- __ JumpToJSEntry(optimized_code_entry);
-
- // Optimized code is marked for deopt, bailout to the CompileLazy runtime
- // function which will clear the feedback vector's optimized code slot.
- __ bind(&gotta_call_runtime);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1253,7 +1305,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
- TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+ InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1275,18 +1327,21 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Push the arguments.
Generate_InterpreterPushArgs(masm, r5, r4, r5, r6);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r4); // Pass the spread in a register
+ __ SubP(r2, r2, Operand(1)); // Subtract one for spread
+ }
// Call the target.
if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ Jump(
+ masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
RelocInfo::CODE_TARGET);
}
@@ -1322,7 +1377,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Generate_InterpreterPushArgs(masm, r2, r6, r2, r7);
__ bind(&skip);
- __ AssertUndefinedOrAllocationSite(r4, r7);
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Pop(r4); // Pass the spread in a register
+ __ SubP(r2, r2, Operand(1)); // Subtract one for spread
+ } else {
+ __ AssertUndefinedOrAllocationSite(r4, r7);
+ }
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r3);
@@ -1451,6 +1511,34 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
+void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ Register closure = r3;
+
+ // Get the feedback vector.
+ Register feedback_vector = r4;
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ // The feedback vector must be defined.
+ if (FLAG_debug_code) {
+ __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
+ __ Assert(ne, BailoutReason::kExpectedFeedbackVector);
+ }
+
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
+
+ // Otherwise, tail call the SFI code.
+ GenerateTailCallToSharedCode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1459,43 +1547,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label try_shared;
Register closure = r3;
- Register index = r4;
+ Register feedback_vector = r4;
// Do we have a valid feedback vector?
- __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+ __ LoadP(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
+ &gotta_call_runtime);
- // Is optimized code available in the feedback vector?
- Register entry = r6;
- __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
- kPointerSize +
- FeedbackVector::kHeaderSize));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Found code, check if it is marked for deopt, if so call into runtime to
- // clear the optimized code slot.
- __ LoadlW(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
- __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ bne(&gotta_call_runtime);
-
- // Code is good, get it into the closure and tail call.
- ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
- __ JumpToJSEntry(entry);
+ // Is there an optimization marker or optimized code in the feedback vector?
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// We found no optimized code.
- __ bind(&try_shared);
+ Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- // Is the shared function marked for tier up?
- __ LoadlB(r7, FieldMemOperand(
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
- __ bne(&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
@@ -1513,15 +1583,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
-
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
-}
-
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1668,30 +1729,70 @@ void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
- SaveFPRegsMode save_doubles) {
+void Builtins::Generate_NotifyBuiltinContinuation(MacroAssembler* masm) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Preserve possible return result from lazy deopt.
+ __ push(r2);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
- __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ __ CallRuntime(Runtime::kNotifyStubFailure, false);
+ __ pop(r2);
+ }
+
+ __ AddP(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Ret(); // Jump to ContinueToBuiltin stub
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Turbofan());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ if (with_result) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point.
+ __ StoreP(
+ r2, MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
}
+ __ LoadP(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ip);
+ __ AddP(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(r0);
+ __ LoadRR(r14, r0);
+ __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+} // namespace
- __ la(sp, MemOperand(sp, kPointerSize)); // Ignore state
- __ Ret(); // Jump to miss handler
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
}
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
}
-void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
@@ -1811,52 +1912,47 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
- // 1. Load receiver into r3, argArray into r2 (if present), remove all
+ // 1. Load receiver into r3, argArray into r4 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ LoadRR(scratch, r2);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ LoadRR(r4, scratch);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r2 : argArray
+ // -- r4 : argArray
// -- r3 : receiver
// -- sp[0] : thisArg
// -----------------------------------
- // 2. Make sure the receiver is actually callable.
- Label receiver_not_callable;
- __ JumpIfSmi(r3, &receiver_not_callable);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
- __ beq(&receiver_not_callable);
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r4, Heap::kUndefinedValueRootIndex, &no_arguments);
- // 4a. Apply the receiver to the given argArray (passing undefined for
- // new.target).
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
@@ -1865,13 +1961,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadImmP(r2, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
- // 4c. The receiver is not callable, throw an appropriate TypeError.
- __ bind(&receiver_not_callable);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
}
// static
@@ -1927,19 +2016,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // 1. Load target into r3 (if present), argumentsList into r4 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ LoadRR(scratch, r3);
- __ LoadRR(r2, r3);
+ __ LoadRR(r4, r3);
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
@@ -1947,37 +2036,25 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
- // -- r2 : argumentsList
+ // -- r4 : argumentsList
// -- r3 : target
// -- sp[0] : thisArgument
// -----------------------------------
- // 2. Make sure the target is actually callable.
- Label target_not_callable;
- __ JumpIfSmi(r3, &target_not_callable);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsCallable);
- __ beq(&target_not_callable);
-
- // 3a. Apply the target to the given argumentsList (passing undefined for
- // new.target).
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
- // 3b. The target is not callable, throw an appropriate TypeError.
- __ bind(&target_not_callable);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
- }
+ // 3 Apply the target to the given argumentsList.
+ __ Jump(masm->isolate()->builtins()->CallWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
@@ -1989,18 +2066,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// -- sp[12] : receiver
// -----------------------------------
- // 1. Load target into r3 (if present), argumentsList into r2 (if present),
+ // 1. Load target into r3 (if present), argumentsList into r4 (if present),
// new.target into r5 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label skip;
- Register arg_size = r4;
+ Register arg_size = r7;
Register new_sp = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ LoadRR(r2, r3);
+ __ LoadRR(r4, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
__ CmpP(arg_size, Operand(kPointerSize));
@@ -2008,7 +2085,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ LoadRR(r5, r3); // new.target defaults to target
__ beq(&skip);
- __ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
@@ -2017,44 +2094,23 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// ----------- S t a t e -------------
- // -- r2 : argumentsList
+ // -- r4 : argumentsList
// -- r5 : new.target
// -- r3 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
- // 2. Make sure the target is actually a constructor.
- Label target_not_constructor;
- __ JumpIfSmi(r3, &target_not_constructor);
- __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsConstructor);
- __ beq(&target_not_constructor);
-
- // 3. Make sure the target is actually a constructor.
- Label new_target_not_constructor;
- __ JumpIfSmi(r5, &new_target_not_constructor);
- __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::kIsConstructor);
- __ beq(&new_target_not_constructor);
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4a. Construct the target with the given new.target and argumentsList.
- __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
-
- // 4b. The target is not a constructor, throw an appropriate TypeError.
- __ bind(&target_not_constructor);
- {
- __ StoreP(r3, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
- // 4c. The new.target is not a constructor, throw an appropriate TypeError.
- __ bind(&new_target_not_constructor);
- {
- __ StoreP(r5, MemOperand(sp, 0));
- __ TailCallRuntime(Runtime::kThrowNotConstructor);
- }
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->ConstructWithArrayLike(),
+ RelocInfo::CODE_TARGET);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2095,99 +2151,17 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_Apply(MacroAssembler* masm) {
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r2 : argumentsList
- // -- r3 : target
- // -- r5 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
+ // -- r3 : target
+ // -- r2 : number of parameters on the stack (not including the receiver)
+ // -- r4 : arguments list (a FixedArray)
+ // -- r6 : len (number of elements to push from args)
+ // -- r5 : new.target (for [[Construct]])
// -----------------------------------
- // Create the list of arguments from the array-like argumentsList.
- {
- Label create_arguments, create_array, create_holey_array, create_runtime,
- done_create;
- __ JumpIfSmi(r2, &create_runtime);
-
- // Load the map of argumentsList into r4.
- __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- // Load native context into r6.
- __ LoadP(r6, NativeContextMemOperand());
-
- // Check if argumentsList is an (unmodified) arguments object.
- __ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ CmpP(ip, r4);
- __ beq(&create_arguments);
- __ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ CmpP(ip, r4);
- __ beq(&create_arguments);
-
- // Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
- __ beq(&create_array);
-
- // Ask the runtime to create the list (actually a FixedArray).
- __ bind(&create_runtime);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r5, r2);
- __ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ Pop(r3, r5);
- __ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ SmiUntag(r4);
- }
- __ b(&done_create);
-
- // Try to create the list from an arguments object.
- __ bind(&create_arguments);
- __ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
- __ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
- __ CmpP(r4, ip);
- __ bne(&create_runtime);
- __ SmiUntag(r4);
- __ LoadRR(r2, r6);
- __ b(&done_create);
-
- // For holey JSArrays we need to check that the array prototype chain
- // protector is intact and our prototype is the Array.prototype actually.
- __ bind(&create_holey_array);
- __ LoadP(r4, FieldMemOperand(r4, Map::kPrototypeOffset));
- __ LoadP(r6, ContextMemOperand(r6, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ CmpP(r4, r6);
- __ bne(&create_runtime);
- __ LoadRoot(r6, Heap::kArrayProtectorRootIndex);
- __ LoadP(r4, FieldMemOperand(r6, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&create_runtime);
- __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
- __ SmiUntag(r4);
- __ b(&done_create);
-
- // Try to create the list from a JSArray object.
- // -- r4 and r6 must be preserved till bne create_holey_array.
- __ bind(&create_array);
- __ LoadlB(r7, FieldMemOperand(r4, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r7);
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ CmpP(r7, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&create_runtime);
- // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
- __ TestBit(r7, Map::kHasNonInstancePrototype, r0);
- __ bne(&create_holey_array);
- // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
- __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
- __ SmiUntag(r4);
-
- __ bind(&done_create);
- }
-
+ __ AssertFixedArray(r4);
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2198,54 +2172,41 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ SubP(ip, sp, ip);
// Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
+ __ ShiftLeftP(r0, r6, Operand(kPointerSizeLog2));
__ CmpP(ip, r0); // Signed comparison.
__ bgt(&done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
- // ----------- S t a t e -------------
- // -- r3 : target
- // -- r2 : args (a FixedArray built from argumentsList)
- // -- r4 : len (number of elements to push from args)
- // -- r5 : new.target (checked to be constructor or undefined)
- // -- sp[0] : thisArgument
- // -----------------------------------
-
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
Label loop, no_args, skip;
- __ CmpP(r4, Operand::Zero());
+ __ CmpP(r6, Operand::Zero());
__ beq(&no_args);
- __ AddP(r2, r2,
+ __ AddP(r4, r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ LoadRR(r1, r4);
+ __ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r2, kPointerSize));
- __ la(r2, MemOperand(r2, kPointerSize));
+ __ LoadP(ip, MemOperand(r4, kPointerSize));
+ __ la(r4, MemOperand(r4, kPointerSize));
__ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
__ bne(&skip, Label::kNear);
- __ LoadRR(ip, r8);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ bind(&skip);
__ push(ip);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
- __ LoadRR(r2, r4);
+ __ AddP(r2, r2, r6);
}
- // Dispatch to Call or Construct depending on whether new.target is undefined.
- {
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- }
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r5 : the new.target (for [[Construct]] calls)
@@ -2271,16 +2232,11 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
{
// Load the length from the ArgumentsAdaptorFrame.
__ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
-#if V8_TARGET_ARCH_S390X
__ SmiUntag(r7);
-#endif
}
__ bind(&arguments_done);
Label stack_done, stack_overflow;
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r7);
-#endif
__ SubP(r7, r7, r4);
__ CmpP(r7, Operand::Zero());
__ ble(&stack_done);
@@ -2313,106 +2269,9 @@ void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-namespace {
-
-// Drops top JavaScript frame and an arguments adaptor frame below it (if
-// present) preserving all the arguments prepared for current call.
-// Does nothing if debugger is currently active.
-// ES6 14.6.3. PrepareForTailCall
-//
-// Stack structure for the function g() tail calling f():
-//
-// ------- Caller frame: -------
-// | ...
-// | g()'s arg M
-// | ...
-// | g()'s arg 1
-// | g()'s receiver arg
-// | g()'s caller pc
-// ------- g()'s frame: -------
-// | g()'s caller fp <- fp
-// | g()'s context
-// | function pointer: g
-// | -------------------------
-// | ...
-// | ...
-// | f()'s arg N
-// | ...
-// | f()'s arg 1
-// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
-// ----------------------
-//
-void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
- Register scratch1, Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Comment cmnt(masm, "[ PrepareForTailCall");
-
- // Prepare for tail call only if ES2015 tail call elimination is active.
- Label done;
- ExternalReference is_tail_call_elimination_enabled =
- ExternalReference::is_tail_call_elimination_enabled_address(
- masm->isolate());
- __ mov(scratch1, Operand(is_tail_call_elimination_enabled));
- __ LoadlB(scratch1, MemOperand(scratch1));
- __ CmpP(scratch1, Operand::Zero());
- __ beq(&done);
-
- // Drop possible interpreter handler/stub frame.
- {
- Label no_interpreter_frame;
- __ LoadP(scratch3,
- MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ bne(&no_interpreter_frame);
- __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ bind(&no_interpreter_frame);
- }
-
- // Check if next frame is an arguments adaptor frame.
- Register caller_args_count_reg = scratch1;
- Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(
- scratch3,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(scratch3,
- Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
- __ bne(&no_arguments_adaptor);
-
- // Drop current frame and load arguments count from arguments adaptor frame.
- __ LoadRR(fp, scratch2);
- __ LoadP(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
- __ b(&formal_parameter_count_loaded);
-
- __ bind(&no_arguments_adaptor);
- // Load caller's formal parameter count
- __ LoadP(scratch1,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ LoadP(scratch1,
- FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(caller_args_count_reg,
- FieldMemOperand(scratch1,
- SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(caller_args_count_reg);
-#endif
-
- __ bind(&formal_parameter_count_loaded);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3);
- __ bind(&done);
-}
-} // namespace
-
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSFunction)
@@ -2424,9 +2283,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift,
- r0);
+ __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorMask, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2435,8 +2292,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
- (1 << SharedFunctionInfo::kNativeBit)));
+ __ AndP(r0, r5,
+ Operand(SharedFunctionInfo::IsStrictBit::kMask |
+ SharedFunctionInfo::IsNativeBit::kMask));
__ bne(&done_convert);
{
// ----------- S t a t e -------------
@@ -2501,15 +2359,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
__ LoadW(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r4);
-#endif
ParameterCount actual(r2);
ParameterCount expected(r4);
__ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
@@ -2609,18 +2460,13 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
// static
-void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r3);
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
@@ -2640,8 +2486,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the target to call (can be any Object).
@@ -2651,10 +2496,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ JumpIfSmi(r3, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
@@ -2662,22 +2507,14 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ TestBit(r6, Map::kIsCallable);
__ beq(&non_callable);
+ // Check if target is a proxy and call CallProxy external builtin
__ CmpP(r7, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 0. Prepare for tail call if necessary.
- if (tail_call_mode == TailCallMode::kAllow) {
- PrepareForTailCall(masm, r2, r5, r6, r7);
- }
-
- // 1. Runtime fallback for Proxy [[Call]].
- __ Push(r3);
- // Increase the arguments size to include the pushed function and the
- // existing receiver on the stack.
- __ AddP(r2, r2, Operand(2));
- // Tail-call to the runtime.
- __ JumpToExternalReference(
- ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
+ __ mov(r7, Operand(ExternalReference(Builtins::kCallProxy, masm->isolate())));
+ __ LoadP(r7, MemOperand(r7));
+ __ AddP(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r7);
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -2688,7 +2525,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
+ ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2700,156 +2537,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
- Register argc = r2;
- Register constructor = r3;
- Register new_target = r5;
-
- Register scratch = r4;
- Register scratch2 = r8;
-
- Register spread = r6;
- Register spread_map = r7;
- Register spread_len = r7;
- Label runtime_call, push_args;
- __ LoadP(spread, MemOperand(sp, 0));
- __ JumpIfSmi(spread, &runtime_call);
- __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
-
- // Check that the spread is an array.
- __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
- __ bne(&runtime_call);
-
- // Check that we have the original ArrayPrototype.
- __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch2,
- ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- __ CmpP(scratch, scratch2);
- __ bne(&runtime_call);
-
- // Check that the ArrayPrototype hasn't been modified in a way that would
- // affect iteration.
- __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- // Check that the map of the initial array iterator hasn't changed.
- __ LoadP(scratch2, NativeContextMemOperand());
- __ LoadP(scratch,
- ContextMemOperand(scratch2,
- Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch2,
- ContextMemOperand(
- scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- __ CmpP(scratch, scratch2);
- __ bne(&runtime_call);
-
- // For FastPacked kinds, iteration will have the same effect as simply
- // accessing each property in order.
- Label no_protector_check;
- __ LoadlB(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(scratch);
- __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS));
- __ bgt(&runtime_call);
- // For non-FastHoley kinds, we can skip the protector check.
- __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS));
- __ beq(&no_protector_check);
- __ CmpP(scratch, Operand(FAST_ELEMENTS));
- __ beq(&no_protector_check);
- // Check the ArrayProtector cell.
- __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
- __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
- __ bne(&runtime_call);
-
- __ bind(&no_protector_check);
- // Load the FixedArray backing store, but use the length from the array.
- __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
- __ SmiUntag(spread_len);
- __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
- __ b(&push_args);
-
- __ bind(&runtime_call);
- {
- // Call the builtin for the result of the spread.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
- __ Push(constructor, new_target, argc, spread);
- __ CallRuntime(Runtime::kSpreadIterableFixed);
- __ LoadRR(spread, r2);
- __ Pop(constructor, new_target, argc);
- __ SmiUntag(argc);
- }
-
- {
- // Calculate the new nargs including the result of the spread.
- __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
- __ SmiUntag(spread_len);
-
- __ bind(&push_args);
- // argc += spread_len - 1. Subtract 1 for the spread itself.
- __ AddP(argc, argc, spread_len);
- __ SubP(argc, argc, Operand(1));
-
- // Pop the spread argument off the stack.
- __ Pop(scratch);
- }
-
- // Check for stack overflow.
- {
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack limit".
- Label done;
- __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
- // Make scratch the space we have left. The stack might already be
- // overflowed here which will cause scratch to become negative.
- __ SubP(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2));
- __ CmpP(scratch, r0);
- __ bgt(&done); // Signed comparison.
- __ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&done);
- }
-
- // Put the evaluated spread onto the stack as additional arguments.
- {
- __ LoadImmP(scratch, Operand::Zero());
- Label done, push, loop;
- __ bind(&loop);
- __ CmpP(scratch, spread_len);
- __ beq(&done);
- __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2));
- __ AddP(scratch2, spread, r0);
- __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
- __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
- __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
- __ bind(&push);
- __ Push(scratch2);
- __ AddP(scratch, scratch, Operand(1));
- __ b(&loop);
- __ bind(&done);
- }
-}
-
-// static
-void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
- // -- r3 : the constructor to call (can be any Object)
- // -----------------------------------
-
- // CheckSpreadAndPushToStack will push r5 to save it.
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- TailCallMode::kDisallow),
- RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2970,18 +2657,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
- // -- r3 : the constructor to call (can be any Object)
- // -- r5 : the new target (either the same as the constructor or
- // the JSFunction on which new was invoked initially)
- // -----------------------------------
-
- CheckSpreadAndPushToStack(masm);
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-}
-
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------