summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/ppc/builtins-ppc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/ppc/builtins-ppc.cc')
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc321
1 files changed, 172 insertions, 149 deletions
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index e5868c50d8..ed3fcfd55d 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -52,7 +52,10 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
USE(GetSharedFunctionInfoBytecodeOrBaseline);
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ __ LoadMap(scratch1, sfi_data);
+
+#ifndef V8_JITLESS
+ __ CompareInstanceType(scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
@@ -63,8 +66,12 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
__ beq(is_baseline);
}
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
+#else
+ __ CompareInstanceType(scratch1, scratch1, INTERPRETER_DATA_TYPE);
+#endif // !V8_JITLESS
+
__ bne(&done);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
@@ -118,12 +125,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset),
r0);
- // Get the Code object from the shared function info.
+ // Get the InstructionStream object from the shared function info.
Register code_obj = r9;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
r0);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
@@ -131,7 +138,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
+ __ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -144,7 +151,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
- __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
+ __ CompareObjectType(code_obj, r6, r6, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
@@ -154,12 +161,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Load the feedback vector.
Register feedback_vector = r5;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
+ r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
@@ -225,15 +231,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Pop(code_obj);
+ __ LoadCodeEntry(code_obj, code_obj);
__ AddS64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
Register scratch = ip;
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, scratch);
- Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ Generate_OSREntry(masm, code_obj, 0);
} else {
- __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable.
@@ -358,8 +364,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
- __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(scratch, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&stack_overflow);
@@ -380,8 +386,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
- // precondition here is: if maybe_target_code is a Code object, it must NOT
- // be marked_for_deoptimization (callers must ensure this).
+ // precondition here is: if maybe_target_code is a InstructionStream object,
+ // it must NOT be marked_for_deoptimization (callers must ensure this).
__ CmpSmiLiteral(maybe_target_code, Smi::zero(), r0);
__ bne(&jump_to_optimized_code);
}
@@ -426,18 +432,19 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
r0);
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (V8_EMBEDDED_CONSTANT_POOL_BOOL) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
}
+ __ LoadCodeEntry(r3, r3);
+
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ SmiUntag(r4,
@@ -486,7 +493,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
@@ -602,8 +609,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(r4, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(r4, MacroAssembler::kCountIsSmi,
+ MacroAssembler::kCountIncludesReceiver);
__ blr();
__ bind(&check_receiver);
@@ -651,10 +658,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r4);
// Load suspended function and context.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
- __ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
- r0);
+ __ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -694,12 +700,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
r0);
{
@@ -710,14 +716,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&done_loop);
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
__ Push(scratch);
__ b(&loop);
__ bind(&done_loop);
// Push receiver.
- __ LoadAnyTaggedField(
+ __ LoadTaggedField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
__ Push(scratch);
}
@@ -725,9 +731,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (v8_flags.debug_code) {
Label is_baseline;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
@@ -737,7 +743,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(r3, FieldMemOperand(
r3, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -747,8 +753,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r6, r4);
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
- r0);
+ __ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
__ JumpCodeObject(r5);
}
@@ -760,7 +765,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
@@ -771,7 +776,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r4);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
}
__ b(&stepping_prepared);
@@ -836,6 +841,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Initialize the root register.
// C calling convention. The first argument is passed in r3.
__ mr(kRootRegister, r3);
+
+#ifdef V8_COMPRESS_POINTERS
+ // Initialize the pointer cage base register.
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
+#endif
}
// Push a frame with special values setup to mark it as an entry frame.
@@ -861,14 +872,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Clear c_entry_fp, now we've pushed its previous value to the stack.
// If the c_entry_fp is not already zero and we don't clear it, the
- // SafeStackFrameIterator will assume we are executing C++ and miss the JS
- // frames on top.
+ // StackFrameIteratorForProfiler will assume we are executing C++ and miss the
+ // JS frames on top.
__ li(r0, Operand::Zero());
__ StoreU64(r0, MemOperand(r3));
Register scratch = r9;
// Set up frame pointer for the frame to be pushed.
- __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+ __ addi(fp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
@@ -956,7 +967,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ StoreU64(r6, MemOperand(scratch));
// Reset the stack to the callee saved registers.
- __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+ __ addi(sp, sp, Operand(-EntryFrameConstants::kNextExitFrameFPOffset));
// Restore callee-saved double registers.
__ MultiPopDoubles(kCalleeSavedDoubles);
@@ -1104,8 +1115,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
- __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
+ MacroAssembler::kCountIncludesReceiver);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1197,12 +1208,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = ip;
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
+ r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
__ AssertFeedbackVector(feedback_vector, r11);
// Check for an tiering state.
@@ -1359,14 +1369,13 @@ void Builtins::Generate_BaselineOutOfLinePrologueDeopt(MacroAssembler* masm) {
void Builtins::Generate_InterpreterEntryTrampoline(
MacroAssembler* masm, InterpreterEntryTrampolineMode mode) {
Register closure = r4;
- Register feedback_vector = r5;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
// Load original bytecode array or the debug copy.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
@@ -1381,18 +1390,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(
BYTECODE_ARRAY_TYPE);
__ bne(&compile_lazy);
+#ifndef V8_JITLESS
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
- feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ Register feedback_vector = r5;
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
+ r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
@@ -1428,6 +1438,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// the frame (that is done below).
__ bind(&push_stack_frame);
+#else
+ // Note: By omitting the above code in jitless mode we also disable:
+ // - kFlagsLogNextExecution: only used for logging/profiling; and
+ // - kInvocationCountOffset: only used for tiering heuristics and code
+ // coverage.
+#endif // !V8_JITLESS
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1568,23 +1584,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ jmp(&after_stack_check_interrupt);
+#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
- __ LoadTaggedPointerField(
- feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
- r0);
+ __ LoadTaggedField(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset),
+ r0);
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
@@ -1603,6 +1620,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
+#endif // !V8_JITLESS
__ bind(&compile_lazy);
__ GenerateTailCallToReturnedCode(Runtime::kCompileLazy);
@@ -1621,7 +1639,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ sub(start_address, start_address, scratch);
// Push the arguments.
__ PushArray(start_address, num_args, scratch, r0,
- TurboAssembler::PushArrayOrder::kReverse);
+ MacroAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1758,19 +1776,19 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
r0);
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadCodeEntry(r5, r5);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@@ -2012,8 +2030,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2096,8 +2114,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2145,8 +2163,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountIncludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger,
+ MacroAssembler::kCountIncludesReceiver);
}
// ----------- S t a t e -------------
@@ -2207,7 +2225,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
-// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
+// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
+// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2224,8 +2243,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(r5, HeapObject::kMapOffset), r0);
+ __ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
+ r0);
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
__ beq(&ok);
@@ -2260,7 +2279,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r7);
__ bind(&loop);
- __ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
+ __ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
__ addi(r5, r5, Operand(kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
@@ -2295,8 +2314,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r6, &new_target_not_constructor);
- __ LoadTaggedPointerField(scratch,
- FieldMemOperand(r6, HeapObject::kMapOffset), r0);
+ __ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
+ r0);
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
@@ -2379,14 +2398,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertCallableFunction(r4);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- __ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
- r0);
+ __ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
@@ -2440,7 +2458,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r3, r4);
__ SmiUntag(r3);
}
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ bind(&convert_receiver);
}
@@ -2471,7 +2489,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r5 and length of that into r7.
Label no_bound_arguments;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
__ beq(&no_bound_arguments, cr0);
@@ -2520,7 +2538,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ subi(r7, r7, Operand(1));
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
__ add(scratch, scratch, r5);
- __ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
+ __ LoadTaggedField(scratch, MemOperand(scratch), r0);
__ Push(scratch);
__ bdnz(&loop);
__ bind(&done);
@@ -2543,15 +2561,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
- __ LoadAnyTaggedField(
- r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
+ __ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
+ r0);
__ StoreReceiver(r6, r3, ip);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
@@ -2651,7 +2669,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
@@ -2683,12 +2701,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label skip;
__ CompareTagged(r4, r6);
__ bne(&skip);
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
@@ -2712,8 +2730,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(
- map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
+ __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
{
Register flags = r5;
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
@@ -2801,15 +2818,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
Register scratch = ip;
Label allocate_vector, done;
- __ LoadTaggedPointerField(
+ __ LoadTaggedField(
vector,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kFeedbackVectorsOffset),
scratch);
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
__ AddS64(vector, vector, scratch);
- __ LoadTaggedPointerField(
- vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
+ __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
+ scratch);
__ JumpIfSmi(vector, &allocate_vector);
__ bind(&done);
__ push(kWasmInstanceRegister);
@@ -2949,8 +2966,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
- SaveFPRegsMode save_doubles, ArgvMode argv_mode,
- bool builtin_exit_frame) {
+ ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r3: number of arguments including receiver
// r4: pointer to builtin function
@@ -2986,9 +3002,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
arg_stack_space += result_size;
}
- __ EnterExitFrame(
- save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
- builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+ __ EnterExitFrame(arg_stack_space, builtin_exit_frame
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mr(r14, r3);
@@ -3053,7 +3069,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// r14: still holds argc (callee-saved).
: r14;
- __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
+ __ LeaveExitFrame(argc, false);
__ blr();
// Handling of exception.
@@ -3328,7 +3344,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
} else {
__ mov(r14, Operand(stack_space));
}
- __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
+ __ LeaveExitFrame(r14, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r14, RootIndex::kTheHoleValue);
@@ -3362,6 +3378,18 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ b(&leave_exit_frame);
}
+MemOperand ExitFrameStackSlotOperand(int offset) {
+ static constexpr int kFrameOffset = 1 * kSystemPointerSize;
+ return MemOperand(sp, (kStackFrameExtraParamSlot * kSystemPointerSize) +
+ offset + kFrameOffset);
+}
+
+MemOperand ExitFrameCallerStackSlotOperand(int index) {
+ return MemOperand(
+ fp, (BuiltinExitFrameConstants::kFixedSlotCountAboveFp + index) *
+ kSystemPointerSize);
+}
+
} // namespace
void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
@@ -3384,12 +3412,13 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
Register scratch = r7;
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
+ using FCI = FunctionCallbackInfo<v8::Value>;
using FCA = FunctionCallbackArguments;
static_assert(FCA::kArgsLength == 6);
static_assert(FCA::kNewTargetIndex == 5);
static_assert(FCA::kDataIndex == 4);
- static_assert(FCA::kReturnValueOffset == 3);
+ static_assert(FCA::kReturnValueIndex == 3);
static_assert(FCA::kReturnValueDefaultValueIndex == 2);
static_assert(FCA::kIsolateIndex == 1);
static_assert(FCA::kHolderIndex == 0);
@@ -3397,12 +3426,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Set up FunctionCallbackInfo's implicit_args on the stack as follows:
//
// Target state:
- // sp[0 * kSystemPointerSize]: kHolder
+ // sp[0 * kSystemPointerSize]: kHolder <= FCI::implicit_args_
// sp[1 * kSystemPointerSize]: kIsolate
// sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
// sp[3 * kSystemPointerSize]: undefined (kReturnValue)
// sp[4 * kSystemPointerSize]: kData
// sp[5 * kSystemPointerSize]: undefined (kNewTarget)
+ // Existing state:
+ // sp[6 * kSystemPointerSize]: <= FCI:::values_
// Reserve space on the stack.
__ subi(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
@@ -3437,52 +3468,46 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// [0] space for DirectCEntryStub's LR save
// [1-3] FunctionCallbackInfo
// [4] number of bytes to drop from the stack after returning
+ static constexpr int kSlotsToDropSize = 1 * kSystemPointerSize;
static constexpr int kApiStackSpace = 5;
- static constexpr bool kDontSaveDoubles = false;
+ static_assert(FCI::kImplicitArgsOffset == 0);
+ static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize);
+ static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize);
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+ __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
- kSystemPointerSize));
+ __ StoreU64(scratch, ExitFrameStackSlotOperand(FCI::kImplicitArgsOffset));
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
__ addi(scratch, scratch,
- Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
- kSystemPointerSize));
+ Operand(FCA::kArgsLengthWithReceiver * kSystemPointerSize));
+ __ StoreU64(scratch, ExitFrameStackSlotOperand(FCI::kValuesOffset));
// FunctionCallbackInfo::length_.
- __ stw(argc,
- MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kSystemPointerSize));
+ __ stw(argc, ExitFrameStackSlotOperand(FCI::kLengthOffset));
// We also store the number of bytes to drop from the stack after returning
// from the API function here.
+ MemOperand stack_space_operand =
+ ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropSize);
__ mov(scratch,
Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
__ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
__ add(scratch, scratch, ip);
- __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
- kSystemPointerSize));
+ __ StoreU64(scratch, stack_space_operand);
// v8::InvocationCallback's argument.
__ addi(r3, sp,
Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
-
- // There are two stack slots above the arguments we constructed on the stack.
- // TODO(jgruber): Document what these arguments are.
- static constexpr int kStackSlotsAboveFCA = 2;
- MemOperand return_value_operand(
- fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
-
+ MemOperand return_value_operand =
+ ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex);
static constexpr int kUseStackSpaceOperand = 0;
- MemOperand stack_space_operand(
- sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
AllowExternalCallThatCantCauseGC scope(masm);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -3497,14 +3522,15 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
int apiStackSpace = 0;
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
- static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- static_assert(PropertyCallbackArguments::kHolderIndex == 1);
- static_assert(PropertyCallbackArguments::kIsolateIndex == 2);
- static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- static_assert(PropertyCallbackArguments::kReturnValueOffset == 4);
- static_assert(PropertyCallbackArguments::kDataIndex == 5);
- static_assert(PropertyCallbackArguments::kThisIndex == 6);
- static_assert(PropertyCallbackArguments::kArgsLength == 7);
+ using PCA = PropertyCallbackArguments;
+ static_assert(PCA::kShouldThrowOnErrorIndex == 0);
+ static_assert(PCA::kHolderIndex == 1);
+ static_assert(PCA::kIsolateIndex == 2);
+ static_assert(PCA::kReturnValueDefaultValueIndex == 3);
+ static_assert(PCA::kReturnValueIndex == 4);
+ static_assert(PCA::kDataIndex == 5);
+ static_assert(PCA::kThisIndex == 6);
+ static_assert(PCA::kArgsLength == 7);
Register receiver = ApiGetterDescriptor::ReceiverRegister();
Register holder = ApiGetterDescriptor::HolderRegister();
@@ -3516,20 +3542,21 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
- __ LoadAnyTaggedField(
- scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
+ __ LoadTaggedField(scratch,
+ FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ constexpr int kNameHandleStackSize = 1;
+ constexpr int kStackUnwindSpace = PCA::kArgsLength + kNameHandleStackSize;
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ mr(r3, sp); // r3 = Handle<Name>
@@ -3557,7 +3584,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, apiStackSpace);
+ __ EnterExitFrame(apiStackSpace, StackFrame::EXIT);
if (!ABI_PASSES_HANDLES_IN_REGS) {
// pass 1st arg by reference
@@ -3573,16 +3600,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
-
__ LoadU64(
api_function_address,
FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset),
r0);
-
- // +3 is to skip prolog, return address and name handle.
- MemOperand return_value_operand(
- fp,
- (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
+ MemOperand return_value_operand = ExitFrameCallerStackSlotOperand(
+ PCA::kReturnValueIndex + kNameHandleStackSize);
MemOperand* const kUseStackSpaceConstant = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kUseStackSpaceConstant,