summaryrefslogtreecommitdiff
path: root/llvm/lib/Target/AArch64
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64')
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp15
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp25
3 files changed, 21 insertions, 21 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index f805adc19815..1ae3709e9588 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -3022,7 +3022,7 @@ bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
// Get a count of how many bytes are to be pushed on the stack.
- NumBytes = CCInfo.getNextStackOffset();
+ NumBytes = CCInfo.getStackSize();
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 04b27b8019f6..2a9d4d37b42c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6560,11 +6560,12 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
}
// This will point to the next argument passed via stack.
- unsigned StackOffset = CCInfo.getNextStackOffset();
+ unsigned VarArgsOffset = CCInfo.getStackSize();
// We currently pass all varargs at 8-byte alignment, or 4 for ILP32
- StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8);
- FuncInfo->setVarArgsStackOffset(StackOffset);
- FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
+ VarArgsOffset = alignTo(VarArgsOffset, Subtarget->isTargetILP32() ? 4 : 8);
+ FuncInfo->setVarArgsStackOffset(VarArgsOffset);
+ FuncInfo->setVarArgsStackIndex(
+ MFI.CreateFixedObject(4, VarArgsOffset, true));
if (MFI.hasMustTailInVarArgFunc()) {
SmallVector<MVT, 2> RegParmTypes;
@@ -6604,7 +6605,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
}
}
- unsigned StackArgSize = CCInfo.getNextStackOffset();
+ unsigned StackArgSize = CCInfo.getStackSize();
bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
// This is a non-standard ABI so by fiat I say we're allowed to make full
@@ -7001,7 +7002,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
// If the stack arguments for this call do not fit into our own save area then
// the call cannot be made tail.
- if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
+ if (CCInfo.getStackSize() > FuncInfo->getBytesInStackArgArea())
return false;
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -7165,7 +7166,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
"site marked musttail");
// Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
+ unsigned NumBytes = CCInfo.getStackSize();
if (IsSibCall) {
// Since we're not changing the ABI to make this a tail call, the memory
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index d09ecdd29e26..f03f80b26899 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -124,7 +124,7 @@ struct AArch64OutgoingValueAssigner
} else
Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
- StackOffset = State.getNextStackOffset();
+ StackSize = State.getStackSize();
return Res;
}
};
@@ -706,7 +706,7 @@ bool AArch64CallLowering::lowerFormalArguments(
}
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- uint64_t StackOffset = Assigner.StackOffset;
+ uint64_t StackSize = Assigner.StackSize;
if (F.isVarArg()) {
if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
// The AAPCS variadic function ABI is identical to the non-variadic
@@ -720,22 +720,21 @@ bool AArch64CallLowering::lowerFormalArguments(
}
// We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
- StackOffset =
- alignTo(Assigner.StackOffset, Subtarget.isTargetILP32() ? 4 : 8);
+ StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
auto &MFI = MIRBuilder.getMF().getFrameInfo();
- FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
+ FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackSize, true));
}
if (doesCalleeRestoreStack(F.getCallingConv(),
MF.getTarget().Options.GuaranteedTailCallOpt)) {
// We have a non-standard ABI, so why not make full use of the stack that
// we're going to pop? It must be aligned to 16 B in any case.
- StackOffset = alignTo(StackOffset, 16);
+ StackSize = alignTo(StackSize, 16);
// If we're expected to restore the stack (e.g. fastcc), then we'll be
// adding a multiple of 16.
- FuncInfo->setArgumentStackToRestore(StackOffset);
+ FuncInfo->setArgumentStackToRestore(StackSize);
// Our own callers will guarantee that the space is free by giving an
// aligned value to CALLSEQ_START.
@@ -745,7 +744,7 @@ bool AArch64CallLowering::lowerFormalArguments(
// will fit on the caller's stack. So, whenever we lower formal arguments,
// we should keep track of this information, since we might lower a tail call
// in this function later.
- FuncInfo->setBytesInStackArgArea(StackOffset);
+ FuncInfo->setBytesInStackArgArea(StackSize);
if (Subtarget.hasCustomCallingConv())
Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
@@ -861,7 +860,7 @@ bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
// Make sure that they can fit on the caller's stack.
const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
+ if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
return false;
}
@@ -1110,7 +1109,7 @@ bool AArch64CallLowering::lowerTailCall(
// The callee will pop the argument stack as a tail call. Thus, we must
// keep it 16-byte aligned.
- NumBytes = alignTo(OutInfo.getNextStackOffset(), 16);
+ NumBytes = alignTo(OutInfo.getStackSize(), 16);
// FPDiff will be negative if this tail call requires more space than we
// would automatically have in our incoming argument space. Positive if we
@@ -1315,12 +1314,12 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
uint64_t CalleePopBytes =
doesCalleeRestoreStack(Info.CallConv,
MF.getTarget().Options.GuaranteedTailCallOpt)
- ? alignTo(Assigner.StackOffset, 16)
+ ? alignTo(Assigner.StackSize, 16)
: 0;
- CallSeqStart.addImm(Assigner.StackOffset).addImm(0);
+ CallSeqStart.addImm(Assigner.StackSize).addImm(0);
MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
- .addImm(Assigner.StackOffset)
+ .addImm(Assigner.StackSize)
.addImm(CalleePopBytes);
// If Callee is a reg, since it is used by a target specific