summaryrefslogtreecommitdiff
path: root/llvm/include/llvm/CodeGen/CallingConvLower.h
diff options
context:
space:
mode:
authorSergei Barannikov <barannikov88@gmail.com>2023-05-01 11:56:39 +0300
committerSergei Barannikov <barannikov88@gmail.com>2023-05-17 21:51:52 +0300
commitda42b2846c82063bd1bce78d6a046f78f218eb72 (patch)
tree7ccf5d5978d8987ad3142c830c48eeb1118d2b67 /llvm/include/llvm/CodeGen/CallingConvLower.h
parent01a796744745d8413d0821c734caf2fbe19f2eca (diff)
downloadllvm-da42b2846c82063bd1bce78d6a046f78f218eb72.tar.gz
[CodeGen] Support allocating of arguments by decreasing offsets
Previously, `CCState::AllocateStack` always allocated stack space by increasing offsets. For targets with stack growing up (away from zero) it is more convenient to allocate arguments by decreasing offsets, so that the first argument is at the top of the stack. This is important when calling a function with variable number of arguments: the callee does not know the size of the stack, but must be able to access "fixed" arguments. For that to work, the "fixed" arguments should have fixed offsets relative to the stack top, i.e. the variadic arguments area should be at the stack bottom (at lowest addresses). The in-tree target with stack growing up is AMDGPU, but it allocates arguments by increasing addresses. It does not support variadic arguments. A drive-by change is to promote stack size/offset to 64-bit integer. This is what MachineFrameInfo expects. Reviewed By: arsenm Differential Revision: https://reviews.llvm.org/D149575
Diffstat (limited to 'llvm/include/llvm/CodeGen/CallingConvLower.h')
-rw-r--r--llvm/include/llvm/CodeGen/CallingConvLower.h42
1 files changed, 25 insertions, 17 deletions
diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h
index 3d70f5a2d8e9..a99519252cf9 100644
--- a/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -91,14 +91,14 @@ public:
return getReg(ValNo, ValVT, RegNo, LocVT, HTP, /*IsCustom=*/true);
}
- static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset,
+ static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset,
MVT LocVT, LocInfo HTP, bool IsCustom = false) {
CCValAssign Ret(HTP, ValNo, ValVT, LocVT, IsCustom);
- Ret.Data = int64_t(Offset);
+ Ret.Data = Offset;
return Ret;
}
- static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, unsigned Offset,
+ static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset,
MVT LocVT, LocInfo HTP) {
return getMem(ValNo, ValVT, Offset, LocVT, HTP, /*IsCustom=*/true);
}
@@ -112,7 +112,7 @@ public:
void convertToReg(unsigned RegNo) { Data = Register(RegNo); }
- void convertToMem(unsigned Offset) { Data = int64_t(Offset); }
+ void convertToMem(int64_t Offset) { Data = Offset; }
unsigned getValNo() const { return ValNo; }
MVT getValVT() const { return ValVT; }
@@ -124,7 +124,7 @@ public:
bool needsCustom() const { return isCustom; }
Register getLocReg() const { return std::get<Register>(Data); }
- unsigned getLocMemOffset() const { return std::get<int64_t>(Data); }
+ int64_t getLocMemOffset() const { return std::get<int64_t>(Data); }
unsigned getExtraInfo() const { return std::get<unsigned>(Data); }
MVT getLocVT() const { return LocVT; }
@@ -174,8 +174,10 @@ private:
const TargetRegisterInfo &TRI;
SmallVectorImpl<CCValAssign> &Locs;
LLVMContext &Context;
+ // True if arguments should be allocated at negative offsets.
+ bool NegativeOffsets;
- unsigned StackSize;
+ uint64_t StackSize;
Align MaxStackArgAlign;
SmallVector<uint32_t, 16> UsedRegs;
SmallVector<CCValAssign, 4> PendingLocs;
@@ -224,8 +226,9 @@ private:
unsigned InRegsParamsProcessed;
public:
- CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
- SmallVectorImpl<CCValAssign> &locs, LLVMContext &C);
+ CCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
+ SmallVectorImpl<CCValAssign> &Locs, LLVMContext &Context,
+ bool NegativeOffsets = false);
void addLoc(const CCValAssign &V) {
Locs.push_back(V);
@@ -237,12 +240,12 @@ public:
bool isVarArg() const { return IsVarArg; }
/// Returns the size of the currently allocated portion of the stack.
- unsigned getStackSize() const { return StackSize; }
+ uint64_t getStackSize() const { return StackSize; }
/// getAlignedCallFrameSize - Return the size of the call frame needed to
/// be able to store all arguments and such that the alignment requirement
/// of each of the arguments is satisfied.
- unsigned getAlignedCallFrameSize() const {
+ uint64_t getAlignedCallFrameSize() const {
return alignTo(StackSize, MaxStackArgAlign);
}
@@ -396,21 +399,26 @@ public:
/// AllocateStack - Allocate a chunk of stack space with the specified size
/// and alignment.
- unsigned AllocateStack(unsigned Size, Align Alignment) {
- StackSize = alignTo(StackSize, Alignment);
- unsigned Result = StackSize;
- StackSize += Size;
+ int64_t AllocateStack(unsigned Size, Align Alignment) {
+ int64_t Offset;
+ if (NegativeOffsets) {
+ StackSize = alignTo(StackSize + Size, Alignment);
+ Offset = -StackSize;
+ } else {
+ Offset = alignTo(StackSize, Alignment);
+ StackSize = Offset + Size;
+ }
MaxStackArgAlign = std::max(Alignment, MaxStackArgAlign);
ensureMaxAlignment(Alignment);
- return Result;
+ return Offset;
}
void ensureMaxAlignment(Align Alignment);
/// Version of AllocateStack with list of extra registers to be shadowed.
/// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
- unsigned AllocateStack(unsigned Size, Align Alignment,
- ArrayRef<MCPhysReg> ShadowRegs) {
+ int64_t AllocateStack(unsigned Size, Align Alignment,
+ ArrayRef<MCPhysReg> ShadowRegs) {
for (MCPhysReg Reg : ShadowRegs)
MarkAllocated(Reg);
return AllocateStack(Size, Alignment);