From f08c3d1d13d0fdc28dff010a88bd9f960c5ea7a9 Mon Sep 17 00:00:00 2001 From: Clement Courbet Date: Thu, 2 Nov 2017 15:02:51 +0000 Subject: [ExpandMemCmp] Split ExpandMemCmp from CodeGen into its own pass. Summary: This is mostly a noop (most of the test diffs are renamed blocks). There are a few temporary register renames (eax<->ecx) and a few blocks are shuffled around. See the discussion in PR33325 for more details. Reviewers: spatel Subscribers: mgorny Differential Revision: https://reviews.llvm.org/D39456 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317211 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 + include/llvm/LinkAllPasses.h | 1 + include/llvm/Transforms/Scalar.h | 8 +++++++- 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index c3ad8fe41af8..67a077081f77 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -128,6 +128,7 @@ void initializeEdgeBundlesPass(PassRegistry&); void initializeEfficiencySanitizerPass(PassRegistry&); void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry&); void initializeExpandISelPseudosPass(PassRegistry&); +void initializeExpandMemCmpPassPass(PassRegistry&); void initializeExpandPostRAPass(PassRegistry&); void initializeExpandReductionsPass(PassRegistry&); void initializeExternalAAWrapperPassPass(PassRegistry&); diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index 765e63926dae..ce70f53ccb04 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -180,6 +180,7 @@ namespace { (void) llvm::createReversePostOrderFunctionAttrsPass(); (void) llvm::createMergeFunctionsPass(); (void) llvm::createMergeICmpsPass(); + (void) llvm::createExpandMemCmpPass(); std::string buf; llvm::raw_string_ostream os(buf); (void) llvm::createPrintModulePass(os); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 8ef65774a93e..4b365858787e 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -422,10 +422,16 @@ Pass *createLowerGuardIntrinsicPass(); //===----------------------------------------------------------------------===// // -// MergeICmps - Merge integer comparison chains +// MergeICmps - Merge integer comparison chains into a memcmp // Pass *createMergeICmpsPass(); +//===----------------------------------------------------------------------===// +// +// ExpandMemCmp - Expand memcmp() to load/stores. +// +Pass *createExpandMemCmpPass(); + //===----------------------------------------------------------------------===// // // ValuePropagation - Propagate CFG-derived value information -- cgit v1.2.1 From c0222867301e7d88ec925dea7d306468ff3ea172 Mon Sep 17 00:00:00 2001 From: Clement Courbet Date: Thu, 2 Nov 2017 15:53:10 +0000 Subject: Revert "[ExpandMemCmp] Split ExpandMemCmp from CodeGen into its own pass." undefined reference to `llvm::TargetPassConfig::ID' on clang-ppc64le-linux-multistage This reverts commit eea333c33fa73ad225ef28607795984829f65688. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317213 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 - include/llvm/LinkAllPasses.h | 1 - include/llvm/Transforms/Scalar.h | 8 +------- 3 files changed, 1 insertion(+), 9 deletions(-) (limited to 'include') diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index 67a077081f77..c3ad8fe41af8 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -128,7 +128,6 @@ void initializeEdgeBundlesPass(PassRegistry&); void initializeEfficiencySanitizerPass(PassRegistry&); void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry&); void initializeExpandISelPseudosPass(PassRegistry&); -void initializeExpandMemCmpPassPass(PassRegistry&); void initializeExpandPostRAPass(PassRegistry&); void initializeExpandReductionsPass(PassRegistry&); void initializeExternalAAWrapperPassPass(PassRegistry&); diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index ce70f53ccb04..765e63926dae 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -180,7 +180,6 @@ namespace { (void) llvm::createReversePostOrderFunctionAttrsPass(); (void) llvm::createMergeFunctionsPass(); (void) llvm::createMergeICmpsPass(); - (void) llvm::createExpandMemCmpPass(); std::string buf; llvm::raw_string_ostream os(buf); (void) llvm::createPrintModulePass(os); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 4b365858787e..8ef65774a93e 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -422,16 +422,10 @@ Pass *createLowerGuardIntrinsicPass(); //===----------------------------------------------------------------------===// // -// MergeICmps - Merge integer comparison chains into a memcmp +// MergeICmps - Merge integer comparison chains // Pass *createMergeICmpsPass(); -//===----------------------------------------------------------------------===// -// -// ExpandMemCmp - Expand memcmp() to load/stores. -// -Pass *createExpandMemCmpPass(); - //===----------------------------------------------------------------------===// // // ValuePropagation - Propagate CFG-derived value information -- cgit v1.2.1 From 0416327f19718d0834f85aa644a2572f67a94acb Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Thu, 2 Nov 2017 17:52:27 +0000 Subject: [TargetParser][AArch64] Reorder enum to preserve 5.0.0 libLLVM ABI. This is required for backporting r311659 to the 5.0.1 release. PR35060 Differential Revision: https://reviews.llvm.org/D39558 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317222 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/TargetParser.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/llvm/Support/TargetParser.h b/include/llvm/Support/TargetParser.h index 6b56a635ff05..b3f91433bd93 100644 --- a/include/llvm/Support/TargetParser.h +++ b/include/llvm/Support/TargetParser.h @@ -167,10 +167,10 @@ enum ArchExtKind : unsigned { AEK_PROFILE = 1 << 6, AEK_RAS = 1 << 7, AEK_LSE = 1 << 8, - AEK_RDM = 1 << 9, - AEK_SVE = 1 << 10, - AEK_DOTPROD = 1 << 11, - AEK_RCPC = 1 << 12 + AEK_SVE = 1 << 9, + AEK_DOTPROD = 1 << 10, + AEK_RCPC = 1 << 11, + AEK_RDM = 1 << 12 }; StringRef getCanonicalArchName(StringRef Arch); -- cgit v1.2.1 From 4c88213d82fbff2542c5aaa8ffb7b0d93c66b6cb Mon Sep 17 00:00:00 2001 From: Mitch Phillips Date: Thu, 2 Nov 2017 18:04:44 +0000 Subject: Fixed line length style issue. Reviewers: zturner Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D39395 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317223 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/MemoryBuffer.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h index 73f0251a6b6e..59c93f15d7b8 100644 --- a/include/llvm/Support/MemoryBuffer.h +++ b/include/llvm/Support/MemoryBuffer.h @@ -136,7 +136,8 @@ public: /// Map a subrange of the specified file as a MemoryBuffer. static ErrorOr> - getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset, bool IsVolatile = false); + getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset, + bool IsVolatile = false); //===--------------------------------------------------------------------===// // Provided for performance analysis. -- cgit v1.2.1 From 2e63034efd79807891a4d201daeb434c2d26c609 Mon Sep 17 00:00:00 2001 From: Adrian Prantl Date: Thu, 2 Nov 2017 20:58:58 +0000 Subject: Add missing header guards. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317267 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm-c/DebugInfo.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/llvm-c/DebugInfo.h b/include/llvm-c/DebugInfo.h index 15f6b57d8831..2c2fdbdf173b 100644 --- a/include/llvm-c/DebugInfo.h +++ b/include/llvm-c/DebugInfo.h @@ -14,6 +14,9 @@ /// //===----------------------------------------------------------------------===// +#ifndef LLVM_C_DEBUGINFO_H +#define LLVM_C_DEBUGINFO_H + #include "llvm-c/Core.h" #ifdef __cplusplus @@ -200,3 +203,5 @@ LLVMDIBuilderCreateDebugLocation(LLVMContextRef Ctx, unsigned Line, #ifdef __cplusplus } // end extern "C" #endif + +#endif -- cgit v1.2.1 From dc666ea9df629f7b5ec1506993f15d406a52acc6 Mon Sep 17 00:00:00 2001 From: Adrian Prantl Date: Thu, 2 Nov 2017 21:35:37 +0000 Subject: Clean up comments in include/llvm-c/DebugInfo.h Patch by Harlan Haskins! Differential Revision: https://reviews.llvm.org/D39568 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317271 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm-c/DebugInfo.h | 143 ++++++++++++++++++++++++++------------------- 1 file changed, 84 insertions(+), 59 deletions(-) (limited to 'include') diff --git a/include/llvm-c/DebugInfo.h b/include/llvm-c/DebugInfo.h index 2c2fdbdf173b..a27b351577a9 100644 --- a/include/llvm-c/DebugInfo.h +++ b/include/llvm-c/DebugInfo.h @@ -23,7 +23,9 @@ extern "C" { #endif -/// Debug info flags. +/** + * Debug info flags. + */ typedef enum { LLVMDIFlagZero = 0, LLVMDIFlagPrivate = 1, @@ -58,7 +60,9 @@ typedef enum { LLVMDIFlagVirtualInheritance } LLVMDIFlags; -/// Source languages known by DWARF. +/** + * Source languages known by DWARF. + */ typedef enum { LLVMDWARFSourceLanguageC89, LLVMDWARFSourceLanguageC, @@ -106,68 +110,85 @@ typedef enum { LLVMDWARFSourceLanguageBORLAND_Delphi } LLVMDWARFSourceLanguage; -/// The amount of debug information to emit. +/** + * The amount of debug information to emit. + */ typedef enum { LLVMDWARFEmissionNone = 0, LLVMDWARFEmissionFull, LLVMDWARFEmissionLineTablesOnly } LLVMDWARFEmissionKind; -/// The current debug metadata version number. +/** + * The current debug metadata version number. + */ unsigned LLVMDebugMetadataVersion(void); -/// The version of debug metadata that's present in the provided \c Module. +/** + * The version of debug metadata that's present in the provided \c Module. + */ unsigned LLVMGetModuleDebugMetadataVersion(LLVMModuleRef Module); -/// Strip debug info in the module if it exists. -/// -/// To do this, we remove all calls to the debugger intrinsics and any named -/// metadata for debugging. We also remove debug locations for instructions. -/// Return true if module is modified. +/** + * Strip debug info in the module if it exists. + * To do this, we remove all calls to the debugger intrinsics and any named + * metadata for debugging. We also remove debug locations for instructions. + * Return true if module is modified. + */ LLVMBool LLVMStripModuleDebugInfo(LLVMModuleRef Module); -/// Construct a builder for a module, and do not allow for unresolved nodes -/// attached to the module. +/** + * Construct a builder for a module, and do not allow for unresolved nodes + * attached to the module. + */ LLVMDIBuilderRef LLVMCreateDIBuilderDisallowUnresolved(LLVMModuleRef M); -/// Construct a builder for a module and collect unresolved nodes attached -/// to the module in order to resolve cycles during a call to -/// \c LLVMDIBuilderFinalize. +/** + * Construct a builder for a module and collect unresolved nodes attached + * to the module in order to resolve cycles during a call to + * \c LLVMDIBuilderFinalize. + */ LLVMDIBuilderRef LLVMCreateDIBuilder(LLVMModuleRef M); -/// Deallocates the DIBuilder and everything it owns. -/// @note You must call \c LLVMDIBuilderFinalize before this +/** + * Deallocates the DIBuilder and everything it owns. + * @note You must call \c LLVMDIBuilderFinalize before this + */ void LLVMDisposeDIBuilder(LLVMDIBuilderRef Builder); -/// Construct any deferred debug info descriptors. +/** + * Construct any deferred debug info descriptors. + */ void LLVMDIBuilderFinalize(LLVMDIBuilderRef Builder); -/// A CompileUnit provides an anchor for all debugging -/// information generated during this instance of compilation. -/// \param Lang Source programming language, eg. -/// \c LLVMDWARFSourceLanguageC99 -/// \param FileRef File info. -/// \param Producer Identify the producer of debugging information -/// and code. Usually this is a compiler -/// version string. -/// \param ProducerLen The length of the C string passed to \c Producer. -/// \param isOptimized A boolean flag which indicates whether optimization -/// is enabled or not. -/// \param Flags This string lists command line options. This -/// string is directly embedded in debug info -/// output which may be used by a tool -/// analyzing generated debugging information. -/// \param FlagsLen The length of the C string passed to \c Flags. -/// \param RuntimeVer This indicates runtime version for languages like -/// Objective-C. -/// \param SplitName The name of the file that we'll split debug info -/// out into. -/// \param SplitNameLen The length of the C string passed to \c SplitName. -/// \param Kind The kind of debug information to generate. -/// \param DWOId The DWOId if this is a split skeleton compile unit. -/// \param SplitDebugInlining Whether to emit inline debug info. -/// \param DebugInfoForProfiling Whether to emit extra debug info for -/// profile collection. +/** + * A CompileUnit provides an anchor for all debugging + * information generated during this instance of compilation. + * \param Lang Source programming language, eg. + * \c LLVMDWARFSourceLanguageC99 + * \param FileRef File info. + * \param Producer Identify the producer of debugging information + * and code. Usually this is a compiler + * version string. + * \param ProducerLen The length of the C string passed to \c Producer. + * \param isOptimized A boolean flag which indicates whether optimization + * is enabled or not. + * \param Flags This string lists command line options. This + * string is directly embedded in debug info + * output which may be used by a tool + * analyzing generated debugging information. + * \param FlagsLen The length of the C string passed to \c Flags. + * \param RuntimeVer This indicates runtime version for languages like + * Objective-C. + * \param SplitName The name of the file that we'll split debug info + * out into. + * \param SplitNameLen The length of the C string passed to \c SplitName. + * \param Kind The kind of debug information to generate. + * \param DWOId The DWOId if this is a split skeleton compile unit. + * \param SplitDebugInlining Whether to emit inline debug info. + * \param DebugInfoForProfiling Whether to emit extra debug info for + * profile collection. + */ LLVMMetadataRef LLVMDIBuilderCreateCompileUnit( LLVMDIBuilderRef Builder, LLVMDWARFSourceLanguage Lang, LLVMMetadataRef FileRef, const char *Producer, size_t ProducerLen, @@ -176,32 +197,36 @@ LLVMMetadataRef LLVMDIBuilderCreateCompileUnit( LLVMDWARFEmissionKind Kind, unsigned DWOId, LLVMBool SplitDebugInlining, LLVMBool DebugInfoForProfiling); -/// Create a file descriptor to hold debugging information for a file. -/// \param Builder The DIBuilder. -/// \param Filename File name. -/// \param FilenameLen The length of the C string passed to \c Filename. -/// \param Directory Directory. -/// \param DirectoryLen The length of the C string passed to \c Directory. +/** + * Create a file descriptor to hold debugging information for a file. + * \param Builder The DIBuilder. + * \param Filename File name. + * \param FilenameLen The length of the C string passed to \c Filename. + * \param Directory Directory. + * \param DirectoryLen The length of the C string passed to \c Directory. + */ LLVMMetadataRef LLVMDIBuilderCreateFile(LLVMDIBuilderRef Builder, const char *Filename, size_t FilenameLen, const char *Directory, size_t DirectoryLen); -/// Creates a new DebugLocation that describes a source location. -/// \param Line The line in the source file. -/// \param Column The column in the source file. -/// \param Scope The scope in which the location resides. -/// \param InlinedAt The scope where this location was inlined, if at all. -/// (optional). -/// \note If the item to which this location is attached cannot be -/// attributed to a source line, pass 0 for the line and column. +/** + * Creates a new DebugLocation that describes a source location. + * \param Line The line in the source file. + * \param Column The column in the source file. + * \param Scope The scope in which the location resides. + * \param InlinedAt The scope where this location was inlined, if at all. + * (optional). + * \note If the item to which this location is attached cannot be + * attributed to a source line, pass 0 for the line and column. + */ LLVMMetadataRef LLVMDIBuilderCreateDebugLocation(LLVMContextRef Ctx, unsigned Line, unsigned Column, LLVMMetadataRef Scope, LLVMMetadataRef InlinedAt); #ifdef __cplusplus -} // end extern "C" +} /* end extern "C" */ #endif #endif -- cgit v1.2.1 From dd33e177dd838793692d7a291dc5552e30642842 Mon Sep 17 00:00:00 2001 From: Hiroshi Yamauchi Date: Thu, 2 Nov 2017 22:26:51 +0000 Subject: Irreducible loop metadata for more accurate block frequency under PGO. Summary: Currently the block frequency analysis is an approximation for irreducible loops. The new irreducible loop metadata is used to annotate the irreducible loop headers with their header weights based on the PGO profile (currently this is approximated to be evenly weighted) and to help improve the accuracy of the block frequency analysis for irreducible loops. This patch is a basic support for this. Reviewers: davidxl Reviewed By: davidxl Subscribers: mehdi_amini, llvm-commits, eraman Differential Revision: https://reviews.llvm.org/D39028 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317278 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/BlockFrequencyInfo.h | 4 ++ include/llvm/Analysis/BlockFrequencyInfoImpl.h | 49 +++++++++++++++++++++--- include/llvm/CodeGen/MachineBasicBlock.h | 10 +++++ include/llvm/CodeGen/MachineBlockFrequencyInfo.h | 2 + include/llvm/IR/BasicBlock.h | 2 + include/llvm/IR/LLVMContext.h | 1 + include/llvm/IR/MDBuilder.h | 3 ++ include/llvm/Transforms/PGOInstrumentation.h | 2 + 8 files changed, 67 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/llvm/Analysis/BlockFrequencyInfo.h b/include/llvm/Analysis/BlockFrequencyInfo.h index d663b09d5cfe..89370cbeeea1 100644 --- a/include/llvm/Analysis/BlockFrequencyInfo.h +++ b/include/llvm/Analysis/BlockFrequencyInfo.h @@ -75,6 +75,10 @@ public: /// the enclosing function's count (if available) and returns the value. Optional getProfileCountFromFreq(uint64_t Freq) const; + /// \brief Returns true if \p BB is an irreducible loop header + /// block. Otherwise false. + bool isIrrLoopHeader(const BasicBlock *BB); + // Set the frequency of the given basic block. void setBlockFreq(const BasicBlock *BB, uint64_t Freq); diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h index 7f166f4a6465..7b916e3653b8 100644 --- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h +++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h @@ -20,6 +20,7 @@ #include "llvm/ADT/Optional.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/SparseBitVector.h" #include "llvm/ADT/Twine.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/BasicBlock.h" @@ -414,6 +415,10 @@ public: /// \brief Data about each block. This is used downstream. std::vector Freqs; + /// \brief Whether each block is an irreducible loop header. + /// This is used downstream. + SparseBitVector<> IsIrrLoopHeader; + /// \brief Loop data: see initializeLoops(). std::vector Working; @@ -492,6 +497,8 @@ public: /// the backedges going into each of the loop headers. void adjustLoopHeaderMass(LoopData &Loop); + void distributeIrrLoopHeaderMass(Distribution &Dist); + /// \brief Package up a loop. void packageLoop(LoopData &Loop); @@ -520,6 +527,7 @@ public: const BlockNode &Node) const; Optional getProfileCountFromFreq(const Function &F, uint64_t Freq) const; + bool isIrrLoopHeader(const BlockNode &Node); void setBlockFreq(const BlockNode &Node, uint64_t Freq); @@ -973,6 +981,10 @@ public: return BlockFrequencyInfoImplBase::getProfileCountFromFreq(F, Freq); } + bool isIrrLoopHeader(const BlockT *BB) { + return BlockFrequencyInfoImplBase::isIrrLoopHeader(getNode(BB)); + } + void setBlockFreq(const BlockT *BB, uint64_t Freq); Scaled64 getFloatingBlockFreq(const BlockT *BB) const { @@ -1140,17 +1152,39 @@ bool BlockFrequencyInfoImpl::computeMassInLoop(LoopData &Loop) { DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n"); if (Loop.isIrreducible()) { - BlockMass Remaining = BlockMass::getFull(); + DEBUG(dbgs() << "isIrreducible = true\n"); + Distribution Dist; + unsigned NumHeadersWithWeight = 0; for (uint32_t H = 0; H < Loop.NumHeaders; ++H) { - auto &Mass = Working[Loop.Nodes[H].Index].getMass(); - Mass = Remaining * BranchProbability(1, Loop.NumHeaders - H); - Remaining -= Mass; + auto &HeaderNode = Loop.Nodes[H]; + const BlockT *Block = getBlock(HeaderNode); + IsIrrLoopHeader.set(Loop.Nodes[H].Index); + Optional HeaderWeight = Block->getIrrLoopHeaderWeight(); + if (!HeaderWeight) + continue; + DEBUG(dbgs() << getBlockName(HeaderNode) + << " has irr loop header weight " << HeaderWeight.getValue() + << "\n"); + NumHeadersWithWeight++; + uint64_t HeaderWeightValue = HeaderWeight.getValue(); + if (HeaderWeightValue) + Dist.addLocal(HeaderNode, HeaderWeightValue); } + if (NumHeadersWithWeight != Loop.NumHeaders) { + // Not all headers have a weight metadata. Distribute weight evenly. + Dist = Distribution(); + for (uint32_t H = 0; H < Loop.NumHeaders; ++H) { + auto &HeaderNode = Loop.Nodes[H]; + Dist.addLocal(HeaderNode, 1); + } + } + distributeIrrLoopHeaderMass(Dist); for (const BlockNode &M : Loop.Nodes) if (!propagateMassToSuccessors(&Loop, M)) llvm_unreachable("unhandled irreducible control flow"); - - adjustLoopHeaderMass(Loop); + if (NumHeadersWithWeight != Loop.NumHeaders) + // Not all headers have a weight metadata. Adjust header mass. + adjustLoopHeaderMass(Loop); } else { Working[Loop.getHeader().Index].getMass() = BlockMass::getFull(); if (!propagateMassToSuccessors(&Loop, Loop.getHeader())) @@ -1285,6 +1319,9 @@ raw_ostream &BlockFrequencyInfoImpl::print(raw_ostream &OS) const { BlockFrequencyInfoImplBase::getBlockProfileCount( *F->getFunction(), getNode(&BB))) OS << ", count = " << ProfileCount.getValue(); + if (Optional IrrLoopHeaderWeight = + BB.getIrrLoopHeaderWeight()) + OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.getValue(); OS << "\n"; } diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h index 51a0d96deda5..0f5b04d90459 100644 --- a/include/llvm/CodeGen/MachineBasicBlock.h +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -97,6 +97,8 @@ private: using const_probability_iterator = std::vector::const_iterator; + Optional IrrLoopHeaderWeight; + /// Keep track of the physical registers that are livein of the basicblock. using LiveInVector = std::vector; LiveInVector LiveIns; @@ -729,6 +731,14 @@ public: /// Return the MCSymbol for this basic block. MCSymbol *getSymbol() const; + Optional getIrrLoopHeaderWeight() const { + return IrrLoopHeaderWeight; + } + + void setIrrLoopHeaderWeight(uint64_t Weight) { + IrrLoopHeaderWeight = Weight; + } + private: /// Return probability iterator corresponding to the I successor iterator. probability_iterator getProbabilityIterator(succ_iterator I); diff --git a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h index cba79c818a76..5b4b99ca0a5d 100644 --- a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h +++ b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h @@ -62,6 +62,8 @@ public: Optional getBlockProfileCount(const MachineBasicBlock *MBB) const; Optional getProfileCountFromFreq(uint64_t Freq) const; + bool isIrrLoopHeader(const MachineBasicBlock *MBB); + const MachineFunction *getFunction() const; const MachineBranchProbabilityInfo *getMBPI() const; void view(const Twine &Name, bool isSimple = true) const; diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h index 6714f2c97473..77cfc9776df0 100644 --- a/include/llvm/IR/BasicBlock.h +++ b/include/llvm/IR/BasicBlock.h @@ -398,6 +398,8 @@ public: /// \brief Return true if it is legal to hoist instructions into this block. bool isLegalToHoistInto() const; + Optional getIrrLoopHeaderWeight() const; + private: /// \brief Increment the internal refcount of the number of BlockAddresses /// referencing this BasicBlock by \p Amt. diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h index 9e935823c775..a95634d32c21 100644 --- a/include/llvm/IR/LLVMContext.h +++ b/include/llvm/IR/LLVMContext.h @@ -101,6 +101,7 @@ public: MD_absolute_symbol = 21, // "absolute_symbol" MD_associated = 22, // "associated" MD_callees = 23, // "callees" + MD_irr_loop = 24, // "irr_loop" }; /// Known operand bundle tag IDs, which always have the same value. All diff --git a/include/llvm/IR/MDBuilder.h b/include/llvm/IR/MDBuilder.h index d679cef95b68..15c1b9cb60ef 100644 --- a/include/llvm/IR/MDBuilder.h +++ b/include/llvm/IR/MDBuilder.h @@ -173,6 +173,9 @@ public: /// base type, access type and offset relative to the base type. MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType, uint64_t Offset, bool IsConstant = false); + + /// \brief Return metadata containing an irreducible loop header weight. + MDNode *createIrrLoopHeaderWeight(uint64_t Weight); }; } // end namespace llvm diff --git a/include/llvm/Transforms/PGOInstrumentation.h b/include/llvm/Transforms/PGOInstrumentation.h index fa7a68624ec8..c2cc76c422da 100644 --- a/include/llvm/Transforms/PGOInstrumentation.h +++ b/include/llvm/Transforms/PGOInstrumentation.h @@ -68,6 +68,8 @@ public: void setProfMetadata(Module *M, Instruction *TI, ArrayRef EdgeCounts, uint64_t MaxCount); +void setIrrLoopHeaderMetadata(Module *M, Instruction *TI, uint64_t Count); + } // end namespace llvm #endif // LLVM_TRANSFORMS_PGOINSTRUMENTATION_H -- cgit v1.2.1 From 0ae3f32f5642942bbc7ebd2f40e1b218eee51fef Mon Sep 17 00:00:00 2001 From: Puyan Lotfi Date: Thu, 2 Nov 2017 23:37:32 +0000 Subject: mir-canon: First commit. mir-canon (MIRCanonicalizerPass) is a pass designed to reorder instructions and rename operands so that two similar programs will diff more cleanly after being run through mir-canon than they would otherwise. This project is still a work in progress and there are ideas still being discussed for improving diff quality. M include/llvm/InitializePasses.h M lib/CodeGen/CMakeLists.txt M lib/CodeGen/CodeGen.cpp A lib/CodeGen/MIRCanonicalizerPass.cpp git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317285 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index c3ad8fe41af8..8c63ab0284df 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -377,6 +377,7 @@ void initializeWinEHPreparePass(PassRegistry&); void initializeWriteBitcodePassPass(PassRegistry&); void initializeWriteThinLTOBitcodePass(PassRegistry&); void initializeXRayInstrumentationPass(PassRegistry&); +void initializeMIRCanonicalizerPass(PassRegistry &); } // end namespace llvm -- cgit v1.2.1 From 3d456013b6bbf241696e8bf1570502412e62a63c Mon Sep 17 00:00:00 2001 From: Clement Courbet Date: Fri, 3 Nov 2017 12:12:27 +0000 Subject: re-land [ExpandMemCmp] Split ExpandMemCmp from CodeGen into its own pass." Fix undefined references: ExpandMemCmp belongs to CodeGen/, not Scalar/. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317318 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/Passes.h | 3 +++ include/llvm/InitializePasses.h | 1 + include/llvm/LinkAllPasses.h | 1 + include/llvm/Transforms/Scalar.h | 2 +- 4 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index 8e6b1570e4a3..c106ff6cdfef 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -417,6 +417,9 @@ namespace llvm { /// shuffles. FunctionPass *createExpandReductionsPass(); + // This pass expands memcmp() to load/stores. + FunctionPass *createExpandMemCmpPass(); + } // End llvm namespace #endif diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index 8c63ab0284df..b8183d1c8e2f 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -128,6 +128,7 @@ void initializeEdgeBundlesPass(PassRegistry&); void initializeEfficiencySanitizerPass(PassRegistry&); void initializeEliminateAvailableExternallyLegacyPassPass(PassRegistry&); void initializeExpandISelPseudosPass(PassRegistry&); +void initializeExpandMemCmpPassPass(PassRegistry&); void initializeExpandPostRAPass(PassRegistry&); void initializeExpandReductionsPass(PassRegistry&); void initializeExternalAAWrapperPassPass(PassRegistry&); diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index 765e63926dae..ce70f53ccb04 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -180,6 +180,7 @@ namespace { (void) llvm::createReversePostOrderFunctionAttrsPass(); (void) llvm::createMergeFunctionsPass(); (void) llvm::createMergeICmpsPass(); + (void) llvm::createExpandMemCmpPass(); std::string buf; llvm::raw_string_ostream os(buf); (void) llvm::createPrintModulePass(os); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 8ef65774a93e..a78c897683fc 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -422,7 +422,7 @@ Pass *createLowerGuardIntrinsicPass(); //===----------------------------------------------------------------------===// // -// MergeICmps - Merge integer comparison chains +// MergeICmps - Merge integer comparison chains into a memcmp // Pass *createMergeICmpsPass(); -- cgit v1.2.1 From 9e5188ca177ad10813f233bd693a57d73a90b86b Mon Sep 17 00:00:00 2001 From: Mikael Holmen Date: Fri, 3 Nov 2017 14:15:08 +0000 Subject: [ADCE] Use MapVector for BlockInfo to make iteration order deterministic Summary: Also added a reserve() method to MapVector since we want to use that from ADCE. DenseMap does not provide deterministic iteration order so with that we will handle the members of BlockInfo in random order, eventually leading to random order of the blocks in the predecessor lists. Without this change, I get the same predecessor order in about 90% of the time when I compile a certain reproducer and in 10% I get a different one. No idea how to make a proper test case for this. Reviewers: kuhar, david2050 Reviewed By: kuhar Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D39593 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317323 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/ADT/MapVector.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/llvm/ADT/MapVector.h b/include/llvm/ADT/MapVector.h index 26a555ee1d3b..3d78f4b203c8 100644 --- a/include/llvm/ADT/MapVector.h +++ b/include/llvm/ADT/MapVector.h @@ -56,6 +56,13 @@ public: size_type size() const { return Vector.size(); } + /// Grow the MapVector so that it can contain at least \p NumEntries items + /// before resizing again. + void reserve(size_type NumEntries) { + Map.reserve(NumEntries); + Vector.reserve(NumEntries); + } + iterator begin() { return Vector.begin(); } const_iterator begin() const { return Vector.begin(); } iterator end() { return Vector.end(); } -- cgit v1.2.1 From 1b91c5e8aad019b3b3649db6c496b74739b4e5d2 Mon Sep 17 00:00:00 2001 From: Jun Bum Lim Date: Fri, 3 Nov 2017 19:01:57 +0000 Subject: Add CallSiteSplitting pass Summary: This change add a pass which tries to split a call-site to pass more constrained arguments if its argument is predicated in the control flow so that we can expose better context to the later passes (e.g, inliner, jump threading, or IPA-CP based function cloning, etc.). As of now we support two cases : 1) If a call site is dominated by an OR condition and if any of its arguments are predicated on this OR condition, try to split the condition with more constrained arguments. For example, in the code below, we try to split the call site since we can predicate the argument (ptr) based on the OR condition. Split from : if (!ptr || c) callee(ptr); to : if (!ptr) callee(null ptr) // set the known constant value else if (c) callee(nonnull ptr) // set non-null attribute in the argument 2) We can also split a call-site based on constant incoming values of a PHI For example, from : BB0: %c = icmp eq i32 %i1, %i2 br i1 %c, label %BB2, label %BB1 BB1: br label %BB2 BB2: %p = phi i32 [ 0, %BB0 ], [ 1, %BB1 ] call void @bar(i32 %p) to BB0: %c = icmp eq i32 %i1, %i2 br i1 %c, label %BB2-split0, label %BB1 BB1: br label %BB2-split1 BB2-split0: call void @bar(i32 0) br label %BB2 BB2-split1: call void @bar(i32 1) br label %BB2 BB2: %p = phi i32 [ 0, %BB2-split0 ], [ 1, %BB2-split1 ] Reviewers: davidxl, huntergr, chandlerc, mcrosier, eraman, davide Reviewed By: davidxl Subscribers: sdesmalen, ashutosh.nema, fhahn, mssimpso, aemerson, mgorny, mehdi_amini, kristof.beyls, llvm-commits Differential Revision: https://reviews.llvm.org/D39137 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317351 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 + include/llvm/Transforms/Scalar.h | 8 ++++++ include/llvm/Transforms/Scalar/CallSiteSplitting.h | 29 ++++++++++++++++++++++ 3 files changed, 38 insertions(+) create mode 100644 include/llvm/Transforms/Scalar/CallSiteSplitting.h (limited to 'include') diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index b8183d1c8e2f..9cdb49330ae1 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -80,6 +80,7 @@ void initializeBranchFolderPassPass(PassRegistry&); void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&); void initializeBranchRelaxationPass(PassRegistry&); void initializeBreakCriticalEdgesPass(PassRegistry&); +void initializeCallSiteSplittingLegacyPassPass(PassRegistry&); void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&); void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&); void initializeCFGPrinterLegacyPassPass(PassRegistry&); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index a78c897683fc..0cf1115dc973 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -73,6 +73,14 @@ FunctionPass *createDeadCodeEliminationPass(); // FunctionPass *createDeadStoreEliminationPass(); + +//===----------------------------------------------------------------------===// +// +// CallSiteSplitting - This pass split call-site based on its known argument +// values. +FunctionPass *createCallSiteSplittingPass(); + + //===----------------------------------------------------------------------===// // // AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm. This diff --git a/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/include/llvm/Transforms/Scalar/CallSiteSplitting.h new file mode 100644 index 000000000000..5ab951a49f2c --- /dev/null +++ b/include/llvm/Transforms/Scalar/CallSiteSplitting.h @@ -0,0 +1,29 @@ +//===- CallSiteSplitting..h - Callsite Splitting ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H +#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H + +#include "llvm/ADT/SetVector.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/PassManager.h" +#include "llvm/Support/Compiler.h" +#include + +namespace llvm { + +struct CallSiteSplittingPass : PassInfoMixin { + /// \brief Run the pass over the function. + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); +}; +} // end namespace llvm + +#endif // LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H -- cgit v1.2.1 From c86c85f907f2513916a2cbd184c8a02d7c64d5a2 Mon Sep 17 00:00:00 2001 From: Jun Bum Lim Date: Fri, 3 Nov 2017 19:17:11 +0000 Subject: Revert "Add CallSiteSplitting pass" Revert due to Buildbot failure. This reverts commit r317351. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317353 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 - include/llvm/Transforms/Scalar.h | 8 ------ include/llvm/Transforms/Scalar/CallSiteSplitting.h | 29 ---------------------- 3 files changed, 38 deletions(-) delete mode 100644 include/llvm/Transforms/Scalar/CallSiteSplitting.h (limited to 'include') diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index 9cdb49330ae1..b8183d1c8e2f 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -80,7 +80,6 @@ void initializeBranchFolderPassPass(PassRegistry&); void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&); void initializeBranchRelaxationPass(PassRegistry&); void initializeBreakCriticalEdgesPass(PassRegistry&); -void initializeCallSiteSplittingLegacyPassPass(PassRegistry&); void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&); void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&); void initializeCFGPrinterLegacyPassPass(PassRegistry&); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 0cf1115dc973..a78c897683fc 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -73,14 +73,6 @@ FunctionPass *createDeadCodeEliminationPass(); // FunctionPass *createDeadStoreEliminationPass(); - -//===----------------------------------------------------------------------===// -// -// CallSiteSplitting - This pass split call-site based on its known argument -// values. -FunctionPass *createCallSiteSplittingPass(); - - //===----------------------------------------------------------------------===// // // AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm. This diff --git a/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/include/llvm/Transforms/Scalar/CallSiteSplitting.h deleted file mode 100644 index 5ab951a49f2c..000000000000 --- a/include/llvm/Transforms/Scalar/CallSiteSplitting.h +++ /dev/null @@ -1,29 +0,0 @@ -//===- CallSiteSplitting..h - Callsite Splitting ------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H -#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H - -#include "llvm/ADT/SetVector.h" -#include "llvm/Analysis/AssumptionCache.h" -#include "llvm/IR/Dominators.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/PassManager.h" -#include "llvm/Support/Compiler.h" -#include - -namespace llvm { - -struct CallSiteSplittingPass : PassInfoMixin { - /// \brief Run the pass over the function. - PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); -}; -} // end namespace llvm - -#endif // LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H -- cgit v1.2.1 From bdc30c02fb2f7dceab4499c871fc00aa9b7543b9 Mon Sep 17 00:00:00 2001 From: Aaron Ballman Date: Fri, 3 Nov 2017 20:01:25 +0000 Subject: Add llvm::for_each as a range-based extensions to and make use of it in some cases where it is a more clear alternative to std::for_each. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317356 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/ADT/STLExtras.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h index 3ec9dfe5de0a..c42d976f4678 100644 --- a/include/llvm/ADT/STLExtras.h +++ b/include/llvm/ADT/STLExtras.h @@ -813,6 +813,13 @@ void DeleteContainerSeconds(Container &C) { C.clear(); } +/// Provide wrappers to std::for_each which take ranges instead of having to +/// pass begin/end explicitly. +template +UnaryPredicate for_each(R &&Range, UnaryPredicate P) { + return std::for_each(std::begin(Range), std::end(Range), P); +} + /// Provide wrappers to std::all_of which take ranges instead of having to pass /// begin/end explicitly. template -- cgit v1.2.1 From 2619256bd715b06c947e862f5f53511795dae1a3 Mon Sep 17 00:00:00 2001 From: Aaron Ballman Date: Fri, 3 Nov 2017 20:05:51 +0000 Subject: Correcting some CRLFs that snuck in with my previous commit; NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317357 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/ADT/STLExtras.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h index c42d976f4678..1be5bf91385b 100644 --- a/include/llvm/ADT/STLExtras.h +++ b/include/llvm/ADT/STLExtras.h @@ -813,12 +813,12 @@ void DeleteContainerSeconds(Container &C) { C.clear(); } -/// Provide wrappers to std::for_each which take ranges instead of having to -/// pass begin/end explicitly. -template -UnaryPredicate for_each(R &&Range, UnaryPredicate P) { - return std::for_each(std::begin(Range), std::end(Range), P); -} +/// Provide wrappers to std::for_each which take ranges instead of having to +/// pass begin/end explicitly. +template +UnaryPredicate for_each(R &&Range, UnaryPredicate P) { + return std::for_each(std::begin(Range), std::end(Range), P); +} /// Provide wrappers to std::all_of which take ranges instead of having to pass /// begin/end explicitly. -- cgit v1.2.1 From ceb5b1b4346ad8e1b2f693199153a5e68c784077 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 3 Nov 2017 20:24:19 +0000 Subject: Modularize: Include some required headers DenseMaps require the definition of a type to be available when using a pointer to that type as a key to know how many bits are available for tombstone/etc. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317360 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/StackMaps.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/CodeGen/StackMaps.h b/include/llvm/CodeGen/StackMaps.h index 8263946ed928..4407114d2741 100644 --- a/include/llvm/CodeGen/StackMaps.h +++ b/include/llvm/CodeGen/StackMaps.h @@ -14,6 +14,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/IR/CallingConv.h" +#include "llvm/MC/MCSymbol.h" #include "llvm/Support/Debug.h" #include #include @@ -25,7 +26,6 @@ namespace llvm { class AsmPrinter; class MCExpr; class MCStreamer; -class MCSymbol; class raw_ostream; class TargetRegisterInfo; -- cgit v1.2.1 From f4beb75be0ff7db0d9c80bbb0efddcd20e7b1d59 Mon Sep 17 00:00:00 2001 From: Jun Bum Lim Date: Fri, 3 Nov 2017 20:41:16 +0000 Subject: Recommit r317351 : Add CallSiteSplitting pass This recommit r317351 after fixing a buildbot failure. Original commit message: Summary: This change add a pass which tries to split a call-site to pass more constrained arguments if its argument is predicated in the control flow so that we can expose better context to the later passes (e.g, inliner, jump threading, or IPA-CP based function cloning, etc.). As of now we support two cases : 1) If a call site is dominated by an OR condition and if any of its arguments are predicated on this OR condition, try to split the condition with more constrained arguments. For example, in the code below, we try to split the call site since we can predicate the argument (ptr) based on the OR condition. Split from : if (!ptr || c) callee(ptr); to : if (!ptr) callee(null ptr) // set the known constant value else if (c) callee(nonnull ptr) // set non-null attribute in the argument 2) We can also split a call-site based on constant incoming values of a PHI For example, from : BB0: %c = icmp eq i32 %i1, %i2 br i1 %c, label %BB2, label %BB1 BB1: br label %BB2 BB2: %p = phi i32 [ 0, %BB0 ], [ 1, %BB1 ] call void @bar(i32 %p) to BB0: %c = icmp eq i32 %i1, %i2 br i1 %c, label %BB2-split0, label %BB1 BB1: br label %BB2-split1 BB2-split0: call void @bar(i32 0) br label %BB2 BB2-split1: call void @bar(i32 1) br label %BB2 BB2: %p = phi i32 [ 0, %BB2-split0 ], [ 1, %BB2-split1 ] git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317362 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 + include/llvm/Transforms/Scalar.h | 8 ++++++ include/llvm/Transforms/Scalar/CallSiteSplitting.h | 29 ++++++++++++++++++++++ 3 files changed, 38 insertions(+) create mode 100644 include/llvm/Transforms/Scalar/CallSiteSplitting.h (limited to 'include') diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index b8183d1c8e2f..9cdb49330ae1 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -80,6 +80,7 @@ void initializeBranchFolderPassPass(PassRegistry&); void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&); void initializeBranchRelaxationPass(PassRegistry&); void initializeBreakCriticalEdgesPass(PassRegistry&); +void initializeCallSiteSplittingLegacyPassPass(PassRegistry&); void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&); void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&); void initializeCFGPrinterLegacyPassPass(PassRegistry&); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index a78c897683fc..0cf1115dc973 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -73,6 +73,14 @@ FunctionPass *createDeadCodeEliminationPass(); // FunctionPass *createDeadStoreEliminationPass(); + +//===----------------------------------------------------------------------===// +// +// CallSiteSplitting - This pass split call-site based on its known argument +// values. +FunctionPass *createCallSiteSplittingPass(); + + //===----------------------------------------------------------------------===// // // AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm. This diff --git a/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/include/llvm/Transforms/Scalar/CallSiteSplitting.h new file mode 100644 index 000000000000..5ab951a49f2c --- /dev/null +++ b/include/llvm/Transforms/Scalar/CallSiteSplitting.h @@ -0,0 +1,29 @@ +//===- CallSiteSplitting..h - Callsite Splitting ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H +#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H + +#include "llvm/ADT/SetVector.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/PassManager.h" +#include "llvm/Support/Compiler.h" +#include + +namespace llvm { + +struct CallSiteSplittingPass : PassInfoMixin { + /// \brief Run the pass over the function. + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); +}; +} // end namespace llvm + +#endif // LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H -- cgit v1.2.1 From 7711c315b294abaa47e3933ec470e04fa5b8ae80 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 3 Nov 2017 20:57:10 +0000 Subject: GCOV: Move GCOV from IR & Support into ProfileData to fix layering This class was split between libIR and libSupport, which breaks under modular code generation. Move it into the one library that uses it, ProfileData, to resolve this issue. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317366 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/ProfileData/GCOV.h | 460 ++++++++++++++++++++++++++++ include/llvm/ProfileData/SampleProfReader.h | 2 +- include/llvm/Support/GCOV.h | 460 ---------------------------- 3 files changed, 461 insertions(+), 461 deletions(-) create mode 100644 include/llvm/ProfileData/GCOV.h delete mode 100644 include/llvm/Support/GCOV.h (limited to 'include') diff --git a/include/llvm/ProfileData/GCOV.h b/include/llvm/ProfileData/GCOV.h new file mode 100644 index 000000000000..497f80b87b26 --- /dev/null +++ b/include/llvm/ProfileData/GCOV.h @@ -0,0 +1,460 @@ +//===- GCOV.h - LLVM coverage tool ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This header provides the interface to read and write coverage files that +// use 'gcov' format. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_PROFILEDATA_GCOV_H +#define LLVM_PROFILEDATA_GCOV_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include +#include +#include +#include + +namespace llvm { + +class GCOVFunction; +class GCOVBlock; +class FileInfo; + +namespace GCOV { + +enum GCOVVersion { V402, V404, V704 }; + +/// \brief A struct for passing gcov options between functions. +struct Options { + Options(bool A, bool B, bool C, bool F, bool P, bool U, bool L, bool N) + : AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F), + PreservePaths(P), UncondBranch(U), LongFileNames(L), NoOutput(N) {} + + bool AllBlocks; + bool BranchInfo; + bool BranchCount; + bool FuncCoverage; + bool PreservePaths; + bool UncondBranch; + bool LongFileNames; + bool NoOutput; +}; + +} // end namespace GCOV + +/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific +/// read operations. +class GCOVBuffer { +public: + GCOVBuffer(MemoryBuffer *B) : Buffer(B) {} + + /// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer. + bool readGCNOFormat() { + StringRef File = Buffer->getBuffer().slice(0, 4); + if (File != "oncg") { + errs() << "Unexpected file type: " << File << ".\n"; + return false; + } + Cursor = 4; + return true; + } + + /// readGCDAFormat - Check GCDA signature is valid at the beginning of buffer. + bool readGCDAFormat() { + StringRef File = Buffer->getBuffer().slice(0, 4); + if (File != "adcg") { + errs() << "Unexpected file type: " << File << ".\n"; + return false; + } + Cursor = 4; + return true; + } + + /// readGCOVVersion - Read GCOV version. + bool readGCOVVersion(GCOV::GCOVVersion &Version) { + StringRef VersionStr = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (VersionStr == "*204") { + Cursor += 4; + Version = GCOV::V402; + return true; + } + if (VersionStr == "*404") { + Cursor += 4; + Version = GCOV::V404; + return true; + } + if (VersionStr == "*704") { + Cursor += 4; + Version = GCOV::V704; + return true; + } + errs() << "Unexpected version: " << VersionStr << ".\n"; + return false; + } + + /// readFunctionTag - If cursor points to a function tag then increment the + /// cursor and return true otherwise return false. + bool readFunctionTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || + Tag[3] != '\1') { + return false; + } + Cursor += 4; + return true; + } + + /// readBlockTag - If cursor points to a block tag then increment the + /// cursor and return true otherwise return false. + bool readBlockTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x41' || + Tag[3] != '\x01') { + return false; + } + Cursor += 4; + return true; + } + + /// readEdgeTag - If cursor points to an edge tag then increment the + /// cursor and return true otherwise return false. + bool readEdgeTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x43' || + Tag[3] != '\x01') { + return false; + } + Cursor += 4; + return true; + } + + /// readLineTag - If cursor points to a line tag then increment the + /// cursor and return true otherwise return false. + bool readLineTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x45' || + Tag[3] != '\x01') { + return false; + } + Cursor += 4; + return true; + } + + /// readArcTag - If cursor points to an gcda arc tag then increment the + /// cursor and return true otherwise return false. + bool readArcTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\xa1' || + Tag[3] != '\1') { + return false; + } + Cursor += 4; + return true; + } + + /// readObjectTag - If cursor points to an object summary tag then increment + /// the cursor and return true otherwise return false. + bool readObjectTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || + Tag[3] != '\xa1') { + return false; + } + Cursor += 4; + return true; + } + + /// readProgramTag - If cursor points to a program summary tag then increment + /// the cursor and return true otherwise return false. + bool readProgramTag() { + StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); + if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || + Tag[3] != '\xa3') { + return false; + } + Cursor += 4; + return true; + } + + bool readInt(uint32_t &Val) { + if (Buffer->getBuffer().size() < Cursor + 4) { + errs() << "Unexpected end of memory buffer: " << Cursor + 4 << ".\n"; + return false; + } + StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor + 4); + Cursor += 4; + Val = *(const uint32_t *)(Str.data()); + return true; + } + + bool readInt64(uint64_t &Val) { + uint32_t Lo, Hi; + if (!readInt(Lo) || !readInt(Hi)) + return false; + Val = ((uint64_t)Hi << 32) | Lo; + return true; + } + + bool readString(StringRef &Str) { + uint32_t Len = 0; + // Keep reading until we find a non-zero length. This emulates gcov's + // behaviour, which appears to do the same. + while (Len == 0) + if (!readInt(Len)) + return false; + Len *= 4; + if (Buffer->getBuffer().size() < Cursor + Len) { + errs() << "Unexpected end of memory buffer: " << Cursor + Len << ".\n"; + return false; + } + Str = Buffer->getBuffer().slice(Cursor, Cursor + Len).split('\0').first; + Cursor += Len; + return true; + } + + uint64_t getCursor() const { return Cursor; } + void advanceCursor(uint32_t n) { Cursor += n * 4; } + +private: + MemoryBuffer *Buffer; + uint64_t Cursor = 0; +}; + +/// GCOVFile - Collects coverage information for one pair of coverage file +/// (.gcno and .gcda). +class GCOVFile { +public: + GCOVFile() = default; + + bool readGCNO(GCOVBuffer &Buffer); + bool readGCDA(GCOVBuffer &Buffer); + uint32_t getChecksum() const { return Checksum; } + void print(raw_ostream &OS) const; + void dump() const; + void collectLineCounts(FileInfo &FI); + +private: + bool GCNOInitialized = false; + GCOV::GCOVVersion Version; + uint32_t Checksum = 0; + SmallVector, 16> Functions; + uint32_t RunCount = 0; + uint32_t ProgramCount = 0; +}; + +/// GCOVEdge - Collects edge information. +struct GCOVEdge { + GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D) {} + + GCOVBlock &Src; + GCOVBlock &Dst; + uint64_t Count = 0; +}; + +/// GCOVFunction - Collects function information. +class GCOVFunction { +public: + using BlockIterator = pointee_iterator>::const_iterator>; + + GCOVFunction(GCOVFile &P) : Parent(P) {} + + bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version); + bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version); + StringRef getName() const { return Name; } + StringRef getFilename() const { return Filename; } + size_t getNumBlocks() const { return Blocks.size(); } + uint64_t getEntryCount() const; + uint64_t getExitCount() const; + + BlockIterator block_begin() const { return Blocks.begin(); } + BlockIterator block_end() const { return Blocks.end(); } + iterator_range blocks() const { + return make_range(block_begin(), block_end()); + } + + void print(raw_ostream &OS) const; + void dump() const; + void collectLineCounts(FileInfo &FI); + +private: + GCOVFile &Parent; + uint32_t Ident = 0; + uint32_t Checksum; + uint32_t LineNumber = 0; + StringRef Name; + StringRef Filename; + SmallVector, 16> Blocks; + SmallVector, 16> Edges; +}; + +/// GCOVBlock - Collects block information. +class GCOVBlock { + struct EdgeWeight { + EdgeWeight(GCOVBlock *D) : Dst(D) {} + + GCOVBlock *Dst; + uint64_t Count = 0; + }; + + struct SortDstEdgesFunctor { + bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) { + return E1->Dst.Number < E2->Dst.Number; + } + }; + +public: + using EdgeIterator = SmallVectorImpl::const_iterator; + + GCOVBlock(GCOVFunction &P, uint32_t N) : Parent(P), Number(N) {} + ~GCOVBlock(); + + const GCOVFunction &getParent() const { return Parent; } + void addLine(uint32_t N) { Lines.push_back(N); } + uint32_t getLastLine() const { return Lines.back(); } + void addCount(size_t DstEdgeNo, uint64_t N); + uint64_t getCount() const { return Counter; } + + void addSrcEdge(GCOVEdge *Edge) { + assert(&Edge->Dst == this); // up to caller to ensure edge is valid + SrcEdges.push_back(Edge); + } + + void addDstEdge(GCOVEdge *Edge) { + assert(&Edge->Src == this); // up to caller to ensure edge is valid + // Check if adding this edge causes list to become unsorted. + if (DstEdges.size() && DstEdges.back()->Dst.Number > Edge->Dst.Number) + DstEdgesAreSorted = false; + DstEdges.push_back(Edge); + } + + size_t getNumSrcEdges() const { return SrcEdges.size(); } + size_t getNumDstEdges() const { return DstEdges.size(); } + void sortDstEdges(); + + EdgeIterator src_begin() const { return SrcEdges.begin(); } + EdgeIterator src_end() const { return SrcEdges.end(); } + iterator_range srcs() const { + return make_range(src_begin(), src_end()); + } + + EdgeIterator dst_begin() const { return DstEdges.begin(); } + EdgeIterator dst_end() const { return DstEdges.end(); } + iterator_range dsts() const { + return make_range(dst_begin(), dst_end()); + } + + void print(raw_ostream &OS) const; + void dump() const; + void collectLineCounts(FileInfo &FI); + +private: + GCOVFunction &Parent; + uint32_t Number; + uint64_t Counter = 0; + bool DstEdgesAreSorted = true; + SmallVector SrcEdges; + SmallVector DstEdges; + SmallVector Lines; +}; + +class FileInfo { + // It is unlikely--but possible--for multiple functions to be on the same + // line. + // Therefore this typedef allows LineData.Functions to store multiple + // functions + // per instance. This is rare, however, so optimize for the common case. + using FunctionVector = SmallVector; + using FunctionLines = DenseMap; + using BlockVector = SmallVector; + using BlockLines = DenseMap; + + struct LineData { + LineData() = default; + + BlockLines Blocks; + FunctionLines Functions; + uint32_t LastLine = 0; + }; + + struct GCOVCoverage { + GCOVCoverage(StringRef Name) : Name(Name) {} + + StringRef Name; + + uint32_t LogicalLines = 0; + uint32_t LinesExec = 0; + + uint32_t Branches = 0; + uint32_t BranchesExec = 0; + uint32_t BranchesTaken = 0; + }; + +public: + FileInfo(const GCOV::Options &Options) : Options(Options) {} + + void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) { + if (Line > LineInfo[Filename].LastLine) + LineInfo[Filename].LastLine = Line; + LineInfo[Filename].Blocks[Line - 1].push_back(Block); + } + + void addFunctionLine(StringRef Filename, uint32_t Line, + const GCOVFunction *Function) { + if (Line > LineInfo[Filename].LastLine) + LineInfo[Filename].LastLine = Line; + LineInfo[Filename].Functions[Line - 1].push_back(Function); + } + + void setRunCount(uint32_t Runs) { RunCount = Runs; } + void setProgramCount(uint32_t Programs) { ProgramCount = Programs; } + void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile, + StringRef GCDAFile); + +private: + std::string getCoveragePath(StringRef Filename, StringRef MainFilename); + std::unique_ptr openCoveragePath(StringRef CoveragePath); + void printFunctionSummary(raw_ostream &OS, const FunctionVector &Funcs) const; + void printBlockInfo(raw_ostream &OS, const GCOVBlock &Block, + uint32_t LineIndex, uint32_t &BlockNo) const; + void printBranchInfo(raw_ostream &OS, const GCOVBlock &Block, + GCOVCoverage &Coverage, uint32_t &EdgeNo); + void printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo, + uint64_t Count) const; + + void printCoverage(raw_ostream &OS, const GCOVCoverage &Coverage) const; + void printFuncCoverage(raw_ostream &OS) const; + void printFileCoverage(raw_ostream &OS) const; + + const GCOV::Options &Options; + StringMap LineInfo; + uint32_t RunCount = 0; + uint32_t ProgramCount = 0; + + using FileCoverageList = SmallVector, 4>; + using FuncCoverageMap = MapVector; + + FileCoverageList FileCoverages; + FuncCoverageMap FuncCoverages; +}; + +} // end namespace llvm + +#endif // LLVM_SUPPORT_GCOV_H diff --git a/include/llvm/ProfileData/SampleProfReader.h b/include/llvm/ProfileData/SampleProfReader.h index 9c1f357cbbd1..0e9ab2dc60ee 100644 --- a/include/llvm/ProfileData/SampleProfReader.h +++ b/include/llvm/ProfileData/SampleProfReader.h @@ -217,10 +217,10 @@ #include "llvm/IR/Function.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/ProfileSummary.h" +#include "llvm/ProfileData/GCOV.h" #include "llvm/ProfileData/SampleProf.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorOr.h" -#include "llvm/Support/GCOV.h" #include "llvm/Support/MemoryBuffer.h" #include #include diff --git a/include/llvm/Support/GCOV.h b/include/llvm/Support/GCOV.h deleted file mode 100644 index 02016e7dbd62..000000000000 --- a/include/llvm/Support/GCOV.h +++ /dev/null @@ -1,460 +0,0 @@ -//===- GCOV.h - LLVM coverage tool ------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This header provides the interface to read and write coverage files that -// use 'gcov' format. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_SUPPORT_GCOV_H -#define LLVM_SUPPORT_GCOV_H - -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/MapVector.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/StringMap.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/ADT/iterator.h" -#include "llvm/ADT/iterator_range.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/raw_ostream.h" -#include -#include -#include -#include -#include -#include - -namespace llvm { - -class GCOVFunction; -class GCOVBlock; -class FileInfo; - -namespace GCOV { - -enum GCOVVersion { V402, V404, V704 }; - -/// \brief A struct for passing gcov options between functions. -struct Options { - Options(bool A, bool B, bool C, bool F, bool P, bool U, bool L, bool N) - : AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F), - PreservePaths(P), UncondBranch(U), LongFileNames(L), NoOutput(N) {} - - bool AllBlocks; - bool BranchInfo; - bool BranchCount; - bool FuncCoverage; - bool PreservePaths; - bool UncondBranch; - bool LongFileNames; - bool NoOutput; -}; - -} // end namespace GCOV - -/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific -/// read operations. -class GCOVBuffer { -public: - GCOVBuffer(MemoryBuffer *B) : Buffer(B) {} - - /// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer. - bool readGCNOFormat() { - StringRef File = Buffer->getBuffer().slice(0, 4); - if (File != "oncg") { - errs() << "Unexpected file type: " << File << ".\n"; - return false; - } - Cursor = 4; - return true; - } - - /// readGCDAFormat - Check GCDA signature is valid at the beginning of buffer. - bool readGCDAFormat() { - StringRef File = Buffer->getBuffer().slice(0, 4); - if (File != "adcg") { - errs() << "Unexpected file type: " << File << ".\n"; - return false; - } - Cursor = 4; - return true; - } - - /// readGCOVVersion - Read GCOV version. - bool readGCOVVersion(GCOV::GCOVVersion &Version) { - StringRef VersionStr = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (VersionStr == "*204") { - Cursor += 4; - Version = GCOV::V402; - return true; - } - if (VersionStr == "*404") { - Cursor += 4; - Version = GCOV::V404; - return true; - } - if (VersionStr == "*704") { - Cursor += 4; - Version = GCOV::V704; - return true; - } - errs() << "Unexpected version: " << VersionStr << ".\n"; - return false; - } - - /// readFunctionTag - If cursor points to a function tag then increment the - /// cursor and return true otherwise return false. - bool readFunctionTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || - Tag[3] != '\1') { - return false; - } - Cursor += 4; - return true; - } - - /// readBlockTag - If cursor points to a block tag then increment the - /// cursor and return true otherwise return false. - bool readBlockTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x41' || - Tag[3] != '\x01') { - return false; - } - Cursor += 4; - return true; - } - - /// readEdgeTag - If cursor points to an edge tag then increment the - /// cursor and return true otherwise return false. - bool readEdgeTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x43' || - Tag[3] != '\x01') { - return false; - } - Cursor += 4; - return true; - } - - /// readLineTag - If cursor points to a line tag then increment the - /// cursor and return true otherwise return false. - bool readLineTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x45' || - Tag[3] != '\x01') { - return false; - } - Cursor += 4; - return true; - } - - /// readArcTag - If cursor points to an gcda arc tag then increment the - /// cursor and return true otherwise return false. - bool readArcTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\xa1' || - Tag[3] != '\1') { - return false; - } - Cursor += 4; - return true; - } - - /// readObjectTag - If cursor points to an object summary tag then increment - /// the cursor and return true otherwise return false. - bool readObjectTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || - Tag[3] != '\xa1') { - return false; - } - Cursor += 4; - return true; - } - - /// readProgramTag - If cursor points to a program summary tag then increment - /// the cursor and return true otherwise return false. - bool readProgramTag() { - StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); - if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || - Tag[3] != '\xa3') { - return false; - } - Cursor += 4; - return true; - } - - bool readInt(uint32_t &Val) { - if (Buffer->getBuffer().size() < Cursor + 4) { - errs() << "Unexpected end of memory buffer: " << Cursor + 4 << ".\n"; - return false; - } - StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor + 4); - Cursor += 4; - Val = *(const uint32_t *)(Str.data()); - return true; - } - - bool readInt64(uint64_t &Val) { - uint32_t Lo, Hi; - if (!readInt(Lo) || !readInt(Hi)) - return false; - Val = ((uint64_t)Hi << 32) | Lo; - return true; - } - - bool readString(StringRef &Str) { - uint32_t Len = 0; - // Keep reading until we find a non-zero length. This emulates gcov's - // behaviour, which appears to do the same. - while (Len == 0) - if (!readInt(Len)) - return false; - Len *= 4; - if (Buffer->getBuffer().size() < Cursor + Len) { - errs() << "Unexpected end of memory buffer: " << Cursor + Len << ".\n"; - return false; - } - Str = Buffer->getBuffer().slice(Cursor, Cursor + Len).split('\0').first; - Cursor += Len; - return true; - } - - uint64_t getCursor() const { return Cursor; } - void advanceCursor(uint32_t n) { Cursor += n * 4; } - -private: - MemoryBuffer *Buffer; - uint64_t Cursor = 0; -}; - -/// GCOVFile - Collects coverage information for one pair of coverage file -/// (.gcno and .gcda). -class GCOVFile { -public: - GCOVFile() = default; - - bool readGCNO(GCOVBuffer &Buffer); - bool readGCDA(GCOVBuffer &Buffer); - uint32_t getChecksum() const { return Checksum; } - void print(raw_ostream &OS) const; - void dump() const; - void collectLineCounts(FileInfo &FI); - -private: - bool GCNOInitialized = false; - GCOV::GCOVVersion Version; - uint32_t Checksum = 0; - SmallVector, 16> Functions; - uint32_t RunCount = 0; - uint32_t ProgramCount = 0; -}; - -/// GCOVEdge - Collects edge information. -struct GCOVEdge { - GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D) {} - - GCOVBlock &Src; - GCOVBlock &Dst; - uint64_t Count = 0; -}; - -/// GCOVFunction - Collects function information. -class GCOVFunction { -public: - using BlockIterator = pointee_iterator>::const_iterator>; - - GCOVFunction(GCOVFile &P) : Parent(P) {} - - bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version); - bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version); - StringRef getName() const { return Name; } - StringRef getFilename() const { return Filename; } - size_t getNumBlocks() const { return Blocks.size(); } - uint64_t getEntryCount() const; - uint64_t getExitCount() const; - - BlockIterator block_begin() const { return Blocks.begin(); } - BlockIterator block_end() const { return Blocks.end(); } - iterator_range blocks() const { - return make_range(block_begin(), block_end()); - } - - void print(raw_ostream &OS) const; - void dump() const; - void collectLineCounts(FileInfo &FI); - -private: - GCOVFile &Parent; - uint32_t Ident = 0; - uint32_t Checksum; - uint32_t LineNumber = 0; - StringRef Name; - StringRef Filename; - SmallVector, 16> Blocks; - SmallVector, 16> Edges; -}; - -/// GCOVBlock - Collects block information. -class GCOVBlock { - struct EdgeWeight { - EdgeWeight(GCOVBlock *D) : Dst(D) {} - - GCOVBlock *Dst; - uint64_t Count = 0; - }; - - struct SortDstEdgesFunctor { - bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) { - return E1->Dst.Number < E2->Dst.Number; - } - }; - -public: - using EdgeIterator = SmallVectorImpl::const_iterator; - - GCOVBlock(GCOVFunction &P, uint32_t N) : Parent(P), Number(N) {} - ~GCOVBlock(); - - const GCOVFunction &getParent() const { return Parent; } - void addLine(uint32_t N) { Lines.push_back(N); } - uint32_t getLastLine() const { return Lines.back(); } - void addCount(size_t DstEdgeNo, uint64_t N); - uint64_t getCount() const { return Counter; } - - void addSrcEdge(GCOVEdge *Edge) { - assert(&Edge->Dst == this); // up to caller to ensure edge is valid - SrcEdges.push_back(Edge); - } - - void addDstEdge(GCOVEdge *Edge) { - assert(&Edge->Src == this); // up to caller to ensure edge is valid - // Check if adding this edge causes list to become unsorted. - if (DstEdges.size() && DstEdges.back()->Dst.Number > Edge->Dst.Number) - DstEdgesAreSorted = false; - DstEdges.push_back(Edge); - } - - size_t getNumSrcEdges() const { return SrcEdges.size(); } - size_t getNumDstEdges() const { return DstEdges.size(); } - void sortDstEdges(); - - EdgeIterator src_begin() const { return SrcEdges.begin(); } - EdgeIterator src_end() const { return SrcEdges.end(); } - iterator_range srcs() const { - return make_range(src_begin(), src_end()); - } - - EdgeIterator dst_begin() const { return DstEdges.begin(); } - EdgeIterator dst_end() const { return DstEdges.end(); } - iterator_range dsts() const { - return make_range(dst_begin(), dst_end()); - } - - void print(raw_ostream &OS) const; - void dump() const; - void collectLineCounts(FileInfo &FI); - -private: - GCOVFunction &Parent; - uint32_t Number; - uint64_t Counter = 0; - bool DstEdgesAreSorted = true; - SmallVector SrcEdges; - SmallVector DstEdges; - SmallVector Lines; -}; - -class FileInfo { - // It is unlikely--but possible--for multiple functions to be on the same - // line. - // Therefore this typedef allows LineData.Functions to store multiple - // functions - // per instance. This is rare, however, so optimize for the common case. - using FunctionVector = SmallVector; - using FunctionLines = DenseMap; - using BlockVector = SmallVector; - using BlockLines = DenseMap; - - struct LineData { - LineData() = default; - - BlockLines Blocks; - FunctionLines Functions; - uint32_t LastLine = 0; - }; - - struct GCOVCoverage { - GCOVCoverage(StringRef Name) : Name(Name) {} - - StringRef Name; - - uint32_t LogicalLines = 0; - uint32_t LinesExec = 0; - - uint32_t Branches = 0; - uint32_t BranchesExec = 0; - uint32_t BranchesTaken = 0; - }; - -public: - FileInfo(const GCOV::Options &Options) : Options(Options) {} - - void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) { - if (Line > LineInfo[Filename].LastLine) - LineInfo[Filename].LastLine = Line; - LineInfo[Filename].Blocks[Line - 1].push_back(Block); - } - - void addFunctionLine(StringRef Filename, uint32_t Line, - const GCOVFunction *Function) { - if (Line > LineInfo[Filename].LastLine) - LineInfo[Filename].LastLine = Line; - LineInfo[Filename].Functions[Line - 1].push_back(Function); - } - - void setRunCount(uint32_t Runs) { RunCount = Runs; } - void setProgramCount(uint32_t Programs) { ProgramCount = Programs; } - void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile, - StringRef GCDAFile); - -private: - std::string getCoveragePath(StringRef Filename, StringRef MainFilename); - std::unique_ptr openCoveragePath(StringRef CoveragePath); - void printFunctionSummary(raw_ostream &OS, const FunctionVector &Funcs) const; - void printBlockInfo(raw_ostream &OS, const GCOVBlock &Block, - uint32_t LineIndex, uint32_t &BlockNo) const; - void printBranchInfo(raw_ostream &OS, const GCOVBlock &Block, - GCOVCoverage &Coverage, uint32_t &EdgeNo); - void printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo, - uint64_t Count) const; - - void printCoverage(raw_ostream &OS, const GCOVCoverage &Coverage) const; - void printFuncCoverage(raw_ostream &OS) const; - void printFileCoverage(raw_ostream &OS) const; - - const GCOV::Options &Options; - StringMap LineInfo; - uint32_t RunCount = 0; - uint32_t ProgramCount = 0; - - using FileCoverageList = SmallVector, 4>; - using FuncCoverageMap = MapVector; - - FileCoverageList FileCoverages; - FuncCoverageMap FuncCoverages; -}; - -} // end namespace llvm - -#endif // LLVM_SUPPORT_GCOV_H -- cgit v1.2.1 From 9cf32a0f1d22c5ece8a581e98166ddac8a6e61a7 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Fri, 3 Nov 2017 21:30:06 +0000 Subject: Revert r317046, "Object: Move some code from ELF.h into ELF.cpp." This change resulted in a measured 1.5-2% perf regression linking chrome. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317371 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Object/ELF.h | 263 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) (limited to 'include') diff --git a/include/llvm/Object/ELF.h b/include/llvm/Object/ELF.h index 92fb46e8e935..c24b6310465e 100644 --- a/include/llvm/Object/ELF.h +++ b/include/llvm/Object/ELF.h @@ -204,6 +204,46 @@ getExtendedSymbolTableIndex(const typename ELFT::Sym *Sym, return ShndxTable[Index]; } +template +Expected +ELFFile::getSectionIndex(const Elf_Sym *Sym, Elf_Sym_Range Syms, + ArrayRef ShndxTable) const { + uint32_t Index = Sym->st_shndx; + if (Index == ELF::SHN_XINDEX) { + auto ErrorOrIndex = getExtendedSymbolTableIndex( + Sym, Syms.begin(), ShndxTable); + if (!ErrorOrIndex) + return ErrorOrIndex.takeError(); + return *ErrorOrIndex; + } + if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE) + return 0; + return Index; +} + +template +Expected +ELFFile::getSection(const Elf_Sym *Sym, const Elf_Shdr *SymTab, + ArrayRef ShndxTable) const { + auto SymsOrErr = symbols(SymTab); + if (!SymsOrErr) + return SymsOrErr.takeError(); + return getSection(Sym, *SymsOrErr, ShndxTable); +} + +template +Expected +ELFFile::getSection(const Elf_Sym *Sym, Elf_Sym_Range Symbols, + ArrayRef ShndxTable) const { + auto IndexOrErr = getSectionIndex(Sym, Symbols, ShndxTable); + if (!IndexOrErr) + return IndexOrErr.takeError(); + uint32_t Index = *IndexOrErr; + if (Index == 0) + return nullptr; + return getSection(Index); +} + template inline Expected getSymbol(typename ELFT::SymRange Symbols, uint32_t Index) { @@ -212,6 +252,15 @@ getSymbol(typename ELFT::SymRange Symbols, uint32_t Index) { return &Symbols[Index]; } +template +Expected +ELFFile::getSymbol(const Elf_Shdr *Sec, uint32_t Index) const { + auto SymtabOrErr = symbols(Sec); + if (!SymtabOrErr) + return SymtabOrErr.takeError(); + return object::getSymbol(*SymtabOrErr, Index); +} + template template Expected> @@ -232,6 +281,119 @@ ELFFile::getSectionContentsAsArray(const Elf_Shdr *Sec) const { return makeArrayRef(Start, Size / sizeof(T)); } +template +Expected> +ELFFile::getSectionContents(const Elf_Shdr *Sec) const { + return getSectionContentsAsArray(Sec); +} + +template +StringRef ELFFile::getRelocationTypeName(uint32_t Type) const { + return getELFRelocationTypeName(getHeader()->e_machine, Type); +} + +template +void ELFFile::getRelocationTypeName(uint32_t Type, + SmallVectorImpl &Result) const { + if (!isMipsELF64()) { + StringRef Name = getRelocationTypeName(Type); + Result.append(Name.begin(), Name.end()); + } else { + // The Mips N64 ABI allows up to three operations to be specified per + // relocation record. Unfortunately there's no easy way to test for the + // presence of N64 ELFs as they have no special flag that identifies them + // as being N64. We can safely assume at the moment that all Mips + // ELFCLASS64 ELFs are N64. New Mips64 ABIs should provide enough + // information to disambiguate between old vs new ABIs. + uint8_t Type1 = (Type >> 0) & 0xFF; + uint8_t Type2 = (Type >> 8) & 0xFF; + uint8_t Type3 = (Type >> 16) & 0xFF; + + // Concat all three relocation type names. + StringRef Name = getRelocationTypeName(Type1); + Result.append(Name.begin(), Name.end()); + + Name = getRelocationTypeName(Type2); + Result.append(1, '/'); + Result.append(Name.begin(), Name.end()); + + Name = getRelocationTypeName(Type3); + Result.append(1, '/'); + Result.append(Name.begin(), Name.end()); + } +} + +template +Expected +ELFFile::getRelocationSymbol(const Elf_Rel *Rel, + const Elf_Shdr *SymTab) const { + uint32_t Index = Rel->getSymbol(isMips64EL()); + if (Index == 0) + return nullptr; + return getEntry(SymTab, Index); +} + +template +Expected +ELFFile::getSectionStringTable(Elf_Shdr_Range Sections) const { + uint32_t Index = getHeader()->e_shstrndx; + if (Index == ELF::SHN_XINDEX) + Index = Sections[0].sh_link; + + if (!Index) // no section string table. + return ""; + if (Index >= Sections.size()) + return createError("invalid section index"); + return getStringTable(&Sections[Index]); +} + +template ELFFile::ELFFile(StringRef Object) : Buf(Object) {} + +template +Expected> ELFFile::create(StringRef Object) { + if (sizeof(Elf_Ehdr) > Object.size()) + return createError("Invalid buffer"); + return ELFFile(Object); +} + +template +Expected ELFFile::sections() const { + const uintX_t SectionTableOffset = getHeader()->e_shoff; + if (SectionTableOffset == 0) + return ArrayRef(); + + if (getHeader()->e_shentsize != sizeof(Elf_Shdr)) + return createError( + "invalid section header entry size (e_shentsize) in ELF header"); + + const uint64_t FileSize = Buf.size(); + + if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize) + return createError("section header table goes past the end of the file"); + + // Invalid address alignment of section headers + if (SectionTableOffset & (alignof(Elf_Shdr) - 1)) + return createError("invalid alignment of section headers"); + + const Elf_Shdr *First = + reinterpret_cast(base() + SectionTableOffset); + + uintX_t NumSections = getHeader()->e_shnum; + if (NumSections == 0) + NumSections = First->sh_size; + + if (NumSections > UINT64_MAX / sizeof(Elf_Shdr)) + return createError("section table goes past the end of file"); + + const uint64_t SectionTableSize = NumSections * sizeof(Elf_Shdr); + + // Section table goes past end of file! + if (SectionTableOffset + SectionTableSize > FileSize) + return createError("section table goes past the end of file"); + + return makeArrayRef(First, NumSections); +} + template template Expected ELFFile::getEntry(uint32_t Section, @@ -254,6 +416,107 @@ Expected ELFFile::getEntry(const Elf_Shdr *Section, return reinterpret_cast(base() + Pos); } +template +Expected +ELFFile::getSection(uint32_t Index) const { + auto TableOrErr = sections(); + if (!TableOrErr) + return TableOrErr.takeError(); + return object::getSection(*TableOrErr, Index); +} + +template +Expected +ELFFile::getStringTable(const Elf_Shdr *Section) const { + if (Section->sh_type != ELF::SHT_STRTAB) + return createError("invalid sh_type for string table, expected SHT_STRTAB"); + auto V = getSectionContentsAsArray(Section); + if (!V) + return V.takeError(); + ArrayRef Data = *V; + if (Data.empty()) + return createError("empty string table"); + if (Data.back() != '\0') + return createError("string table non-null terminated"); + return StringRef(Data.begin(), Data.size()); +} + +template +Expected> +ELFFile::getSHNDXTable(const Elf_Shdr &Section) const { + auto SectionsOrErr = sections(); + if (!SectionsOrErr) + return SectionsOrErr.takeError(); + return getSHNDXTable(Section, *SectionsOrErr); +} + +template +Expected> +ELFFile::getSHNDXTable(const Elf_Shdr &Section, + Elf_Shdr_Range Sections) const { + assert(Section.sh_type == ELF::SHT_SYMTAB_SHNDX); + auto VOrErr = getSectionContentsAsArray(&Section); + if (!VOrErr) + return VOrErr.takeError(); + ArrayRef V = *VOrErr; + auto SymTableOrErr = object::getSection(Sections, Section.sh_link); + if (!SymTableOrErr) + return SymTableOrErr.takeError(); + const Elf_Shdr &SymTable = **SymTableOrErr; + if (SymTable.sh_type != ELF::SHT_SYMTAB && + SymTable.sh_type != ELF::SHT_DYNSYM) + return createError("invalid sh_type"); + if (V.size() != (SymTable.sh_size / sizeof(Elf_Sym))) + return createError("invalid section contents size"); + return V; +} + +template +Expected +ELFFile::getStringTableForSymtab(const Elf_Shdr &Sec) const { + auto SectionsOrErr = sections(); + if (!SectionsOrErr) + return SectionsOrErr.takeError(); + return getStringTableForSymtab(Sec, *SectionsOrErr); +} + +template +Expected +ELFFile::getStringTableForSymtab(const Elf_Shdr &Sec, + Elf_Shdr_Range Sections) const { + + if (Sec.sh_type != ELF::SHT_SYMTAB && Sec.sh_type != ELF::SHT_DYNSYM) + return createError( + "invalid sh_type for symbol table, expected SHT_SYMTAB or SHT_DYNSYM"); + auto SectionOrErr = object::getSection(Sections, Sec.sh_link); + if (!SectionOrErr) + return SectionOrErr.takeError(); + return getStringTable(*SectionOrErr); +} + +template +Expected +ELFFile::getSectionName(const Elf_Shdr *Section) const { + auto SectionsOrErr = sections(); + if (!SectionsOrErr) + return SectionsOrErr.takeError(); + auto Table = getSectionStringTable(*SectionsOrErr); + if (!Table) + return Table.takeError(); + return getSectionName(Section, *Table); +} + +template +Expected ELFFile::getSectionName(const Elf_Shdr *Section, + StringRef DotShstrtab) const { + uint32_t Offset = Section->sh_name; + if (Offset == 0) + return StringRef(); + if (Offset >= DotShstrtab.size()) + return createError("invalid string offset"); + return StringRef(DotShstrtab.data() + Offset); +} + /// This function returns the hash value for a symbol in the .dynsym section /// Name of the API remains consistent as specified in the libelf /// REF : http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash -- cgit v1.2.1 From cdc57825ed64b0995a34dcbf9f330e8b2d5cd5bd Mon Sep 17 00:00:00 2001 From: Sean Fertile Date: Fri, 3 Nov 2017 21:45:55 +0000 Subject: [LTO][ThinLTO] Use the linker resolutions to mark global values as dso_local. Now that we have a way to mark GlobalValues as local we can use the symbol resolutions that the linker plugin provides as part of lto/thinlto link step to refine the compilers view on what symbols will end up being local. Differential Revision: https://reviews.llvm.org/D35702 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317374 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/ModuleSummaryIndex.h | 12 ++++++++++-- include/llvm/IR/ModuleSummaryIndexYAML.h | 8 +++++--- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h index 2d664f41e3ce..b1e58a2a0d9b 100644 --- a/include/llvm/IR/ModuleSummaryIndex.h +++ b/include/llvm/IR/ModuleSummaryIndex.h @@ -148,11 +148,15 @@ public: /// In combined summary, indicate that the global value is live. unsigned Live : 1; + /// Indicates that the linker resolved the symbol to a definition from + /// within the same linkage unit. + unsigned DSOLocal : 1; + /// Convenience Constructors explicit GVFlags(GlobalValue::LinkageTypes Linkage, - bool NotEligibleToImport, bool Live) + bool NotEligibleToImport, bool Live, bool IsLocal) : Linkage(Linkage), NotEligibleToImport(NotEligibleToImport), - Live(Live) {} + Live(Live), DSOLocal(IsLocal) {} }; private: @@ -229,6 +233,10 @@ public: void setLive(bool Live) { Flags.Live = Live; } + void setDSOLocal(bool Local) { Flags.DSOLocal = Local; } + + bool isDSOLocal() const { return Flags.DSOLocal; } + /// Flag that this global value cannot be imported. void setNotEligibleToImport() { Flags.NotEligibleToImport = true; } diff --git a/include/llvm/IR/ModuleSummaryIndexYAML.h b/include/llvm/IR/ModuleSummaryIndexYAML.h index 2f9990ca03d8..4687f2d53e7e 100644 --- a/include/llvm/IR/ModuleSummaryIndexYAML.h +++ b/include/llvm/IR/ModuleSummaryIndexYAML.h @@ -135,7 +135,7 @@ template <> struct MappingTraits { struct FunctionSummaryYaml { unsigned Linkage; - bool NotEligibleToImport, Live; + bool NotEligibleToImport, Live, IsLocal; std::vector TypeTests; std::vector TypeTestAssumeVCalls, TypeCheckedLoadVCalls; @@ -177,6 +177,7 @@ template <> struct MappingTraits { io.mapOptional("Linkage", summary.Linkage); io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport); io.mapOptional("Live", summary.Live); + io.mapOptional("Local", summary.IsLocal); io.mapOptional("TypeTests", summary.TypeTests); io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls); io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls); @@ -211,7 +212,7 @@ template <> struct CustomMappingTraits { Elem.SummaryList.push_back(llvm::make_unique( GlobalValueSummary::GVFlags( static_cast(FSum.Linkage), - FSum.NotEligibleToImport, FSum.Live), + FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal), 0, FunctionSummary::FFlags{}, ArrayRef{}, ArrayRef{}, std::move(FSum.TypeTests), std::move(FSum.TypeTestAssumeVCalls), @@ -228,7 +229,8 @@ template <> struct CustomMappingTraits { FSums.push_back(FunctionSummaryYaml{ FSum->flags().Linkage, static_cast(FSum->flags().NotEligibleToImport), - static_cast(FSum->flags().Live), FSum->type_tests(), + static_cast(FSum->flags().Live), + static_cast(FSum->flags().DSOLocal), FSum->type_tests(), FSum->type_test_assume_vcalls(), FSum->type_checked_load_vcalls(), FSum->type_test_assume_const_vcalls(), FSum->type_checked_load_const_vcalls()}); -- cgit v1.2.1 From 803f827385f6dce7f4b44867efdc84b332fd82d2 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 3 Nov 2017 22:32:11 +0000 Subject: Move TargetFrameLowering.h to CodeGen where it's implemented This header already includes a CodeGen header and is implemented in lib/CodeGen, so move the header there to match. This fixes a link error with modular codegeneration builds - where a header and its implementation are circularly dependent and so need to be in the same library, not split between two like this. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317379 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/TargetFrameLowering.h | 348 +++++++++++++++++++++++++++++ include/llvm/Target/TargetFrameLowering.h | 348 ----------------------------- 2 files changed, 348 insertions(+), 348 deletions(-) create mode 100644 include/llvm/CodeGen/TargetFrameLowering.h delete mode 100644 include/llvm/Target/TargetFrameLowering.h (limited to 'include') diff --git a/include/llvm/CodeGen/TargetFrameLowering.h b/include/llvm/CodeGen/TargetFrameLowering.h new file mode 100644 index 000000000000..5cf4627f3c96 --- /dev/null +++ b/include/llvm/CodeGen/TargetFrameLowering.h @@ -0,0 +1,348 @@ +//===-- llvm/CodeGen/TargetFrameLowering.h ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Interface to describe the layout of a stack frame on the target machine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_TARGETFRAMELOWERING_H +#define LLVM_CODEGEN_TARGETFRAMELOWERING_H + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include +#include + +namespace llvm { + class BitVector; + class CalleeSavedInfo; + class MachineFunction; + class RegScavenger; + +/// Information about stack frame layout on the target. It holds the direction +/// of stack growth, the known stack alignment on entry to each function, and +/// the offset to the locals area. +/// +/// The offset to the local area is the offset from the stack pointer on +/// function entry to the first location where function data (local variables, +/// spill locations) can be stored. +class TargetFrameLowering { +public: + enum StackDirection { + StackGrowsUp, // Adding to the stack increases the stack address + StackGrowsDown // Adding to the stack decreases the stack address + }; + + // Maps a callee saved register to a stack slot with a fixed offset. + struct SpillSlot { + unsigned Reg; + int Offset; // Offset relative to stack pointer on function entry. + }; +private: + StackDirection StackDir; + unsigned StackAlignment; + unsigned TransientStackAlignment; + int LocalAreaOffset; + bool StackRealignable; +public: + TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO, + unsigned TransAl = 1, bool StackReal = true) + : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl), + LocalAreaOffset(LAO), StackRealignable(StackReal) {} + + virtual ~TargetFrameLowering(); + + // These methods return information that describes the abstract stack layout + // of the target machine. + + /// getStackGrowthDirection - Return the direction the stack grows + /// + StackDirection getStackGrowthDirection() const { return StackDir; } + + /// getStackAlignment - This method returns the number of bytes to which the + /// stack pointer must be aligned on entry to a function. Typically, this + /// is the largest alignment for any data object in the target. + /// + unsigned getStackAlignment() const { return StackAlignment; } + + /// alignSPAdjust - This method aligns the stack adjustment to the correct + /// alignment. + /// + int alignSPAdjust(int SPAdj) const { + if (SPAdj < 0) { + SPAdj = -alignTo(-SPAdj, StackAlignment); + } else { + SPAdj = alignTo(SPAdj, StackAlignment); + } + return SPAdj; + } + + /// getTransientStackAlignment - This method returns the number of bytes to + /// which the stack pointer must be aligned at all times, even between + /// calls. + /// + unsigned getTransientStackAlignment() const { + return TransientStackAlignment; + } + + /// isStackRealignable - This method returns whether the stack can be + /// realigned. + bool isStackRealignable() const { + return StackRealignable; + } + + /// Return the skew that has to be applied to stack alignment under + /// certain conditions (e.g. stack was adjusted before function \p MF + /// was called). + virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const; + + /// getOffsetOfLocalArea - This method returns the offset of the local area + /// from the stack pointer on entrance to a function. + /// + int getOffsetOfLocalArea() const { return LocalAreaOffset; } + + /// isFPCloseToIncomingSP - Return true if the frame pointer is close to + /// the incoming stack pointer, false if it is close to the post-prologue + /// stack pointer. + virtual bool isFPCloseToIncomingSP() const { return true; } + + /// assignCalleeSavedSpillSlots - Allows target to override spill slot + /// assignment logic. If implemented, assignCalleeSavedSpillSlots() should + /// assign frame slots to all CSI entries and return true. If this method + /// returns false, spill slots will be assigned using generic implementation. + /// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of + /// CSI. + virtual bool + assignCalleeSavedSpillSlots(MachineFunction &MF, + const TargetRegisterInfo *TRI, + std::vector &CSI) const { + return false; + } + + /// getCalleeSavedSpillSlots - This method returns a pointer to an array of + /// pairs, that contains an entry for each callee saved register that must be + /// spilled to a particular stack location if it is spilled. + /// + /// Each entry in this array contains a pair, indicating the + /// fixed offset from the incoming stack pointer that each register should be + /// spilled at. If a register is not listed here, the code generator is + /// allowed to spill it anywhere it chooses. + /// + virtual const SpillSlot * + getCalleeSavedSpillSlots(unsigned &NumEntries) const { + NumEntries = 0; + return nullptr; + } + + /// targetHandlesStackFrameRounding - Returns true if the target is + /// responsible for rounding up the stack frame (probably at emitPrologue + /// time). + virtual bool targetHandlesStackFrameRounding() const { + return false; + } + + /// Returns true if the target will correctly handle shrink wrapping. + virtual bool enableShrinkWrapping(const MachineFunction &MF) const { + return false; + } + + /// Returns true if the stack slot holes in the fixed and callee-save stack + /// area should be used when allocating other stack locations to reduce stack + /// size. + virtual bool enableStackSlotScavenging(const MachineFunction &MF) const { + return false; + } + + /// emitProlog/emitEpilog - These methods insert prolog and epilog code into + /// the function. + virtual void emitPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const = 0; + virtual void emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const = 0; + + /// Replace a StackProbe stub (if any) with the actual probe code inline + virtual void inlineStackProbe(MachineFunction &MF, + MachineBasicBlock &PrologueMBB) const {} + + /// Adjust the prologue to have the function use segmented stacks. This works + /// by adding a check even before the "normal" function prologue. + virtual void adjustForSegmentedStacks(MachineFunction &MF, + MachineBasicBlock &PrologueMBB) const {} + + /// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in + /// the assembly prologue to explicitly handle the stack. + virtual void adjustForHiPEPrologue(MachineFunction &MF, + MachineBasicBlock &PrologueMBB) const {} + + /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee + /// saved registers and returns true if it isn't possible / profitable to do + /// so by issuing a series of store instructions via + /// storeRegToStackSlot(). Returns false otherwise. + virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const { + return false; + } + + /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee + /// saved registers and returns true if it isn't possible / profitable to do + /// so by issuing a series of load instructions via loadRegToStackSlot(). + /// If it returns true, and any of the registers in CSI is not restored, + /// it sets the corresponding Restored flag in CSI to false. + /// Returns false otherwise. + virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + std::vector &CSI, + const TargetRegisterInfo *TRI) const { + return false; + } + + /// Return true if the target needs to disable frame pointer elimination. + virtual bool noFramePointerElim(const MachineFunction &MF) const; + + /// hasFP - Return true if the specified function should have a dedicated + /// frame pointer register. For most targets this is true only if the function + /// has variable sized allocas or if frame pointer elimination is disabled. + virtual bool hasFP(const MachineFunction &MF) const = 0; + + /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is + /// not required, we reserve argument space for call sites in the function + /// immediately on entry to the current function. This eliminates the need for + /// add/sub sp brackets around call sites. Returns true if the call frame is + /// included as part of the stack frame. + virtual bool hasReservedCallFrame(const MachineFunction &MF) const { + return !hasFP(MF); + } + + /// canSimplifyCallFramePseudos - When possible, it's best to simplify the + /// call frame pseudo ops before doing frame index elimination. This is + /// possible only when frame index references between the pseudos won't + /// need adjusting for the call frame adjustments. Normally, that's true + /// if the function has a reserved call frame or a frame pointer. Some + /// targets (Thumb2, for example) may have more complicated criteria, + /// however, and can override this behavior. + virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const { + return hasReservedCallFrame(MF) || hasFP(MF); + } + + // needsFrameIndexResolution - Do we need to perform FI resolution for + // this function. Normally, this is required only when the function + // has any stack objects. However, targets may want to override this. + virtual bool needsFrameIndexResolution(const MachineFunction &MF) const; + + /// getFrameIndexReference - This method should return the base register + /// and offset used to reference a frame index location. The offset is + /// returned directly, and the base register is returned via FrameReg. + virtual int getFrameIndexReference(const MachineFunction &MF, int FI, + unsigned &FrameReg) const; + + /// Same as \c getFrameIndexReference, except that the stack pointer (as + /// opposed to the frame pointer) will be the preferred value for \p + /// FrameReg. This is generally used for emitting statepoint or EH tables that + /// use offsets from RSP. If \p IgnoreSPUpdates is true, the returned + /// offset is only guaranteed to be valid with respect to the value of SP at + /// the end of the prologue. + virtual int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, + unsigned &FrameReg, + bool IgnoreSPUpdates) const { + // Always safe to dispatch to getFrameIndexReference. + return getFrameIndexReference(MF, FI, FrameReg); + } + + /// This method determines which of the registers reported by + /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved. + /// The default implementation checks populates the \p SavedRegs bitset with + /// all registers which are modified in the function, targets may override + /// this function to save additional registers. + /// This method also sets up the register scavenger ensuring there is a free + /// register or a frameindex available. + virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, + RegScavenger *RS = nullptr) const; + + /// processFunctionBeforeFrameFinalized - This method is called immediately + /// before the specified function's frame layout (MF.getFrameInfo()) is + /// finalized. Once the frame is finalized, MO_FrameIndex operands are + /// replaced with direct constants. This method is optional. + /// + virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF, + RegScavenger *RS = nullptr) const { + } + + virtual unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const { + report_fatal_error("WinEH not implemented for this target"); + } + + /// This method is called during prolog/epilog code insertion to eliminate + /// call frame setup and destroy pseudo instructions (but only if the Target + /// is using them). It is responsible for eliminating these instructions, + /// replacing them with concrete instructions. This method need only be + /// implemented if using call frame setup/destroy pseudo instructions. + /// Returns an iterator pointing to the instruction after the replaced one. + virtual MachineBasicBlock::iterator + eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const { + llvm_unreachable("Call Frame Pseudo Instructions do not exist on this " + "target!"); + } + + + /// Order the symbols in the local stack frame. + /// The list of objects that we want to order is in \p objectsToAllocate as + /// indices into the MachineFrameInfo. The array can be reordered in any way + /// upon return. The contents of the array, however, may not be modified (i.e. + /// only their order may be changed). + /// By default, just maintain the original order. + virtual void + orderFrameObjects(const MachineFunction &MF, + SmallVectorImpl &objectsToAllocate) const { + } + + /// Check whether or not the given \p MBB can be used as a prologue + /// for the target. + /// The prologue will be inserted first in this basic block. + /// This method is used by the shrink-wrapping pass to decide if + /// \p MBB will be correctly handled by the target. + /// As soon as the target enable shrink-wrapping without overriding + /// this method, we assume that each basic block is a valid + /// prologue. + virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const { + return true; + } + + /// Check whether or not the given \p MBB can be used as a epilogue + /// for the target. + /// The epilogue will be inserted before the first terminator of that block. + /// This method is used by the shrink-wrapping pass to decide if + /// \p MBB will be correctly handled by the target. + /// As soon as the target enable shrink-wrapping without overriding + /// this method, we assume that each basic block is a valid + /// epilogue. + virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const { + return true; + } + + /// Check if given function is safe for not having callee saved registers. + /// This is used when interprocedural register allocation is enabled. + static bool isSafeForNoCSROpt(const Function *F) { + if (!F->hasLocalLinkage() || F->hasAddressTaken() || + !F->hasFnAttribute(Attribute::NoRecurse)) + return false; + // Function should not be optimized as tail call. + for (const User *U : F->users()) + if (auto CS = ImmutableCallSite(U)) + if (CS.isTailCall()) + return false; + return true; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Target/TargetFrameLowering.h b/include/llvm/Target/TargetFrameLowering.h deleted file mode 100644 index 31017cbc27b8..000000000000 --- a/include/llvm/Target/TargetFrameLowering.h +++ /dev/null @@ -1,348 +0,0 @@ -//===-- llvm/Target/TargetFrameLowering.h ---------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Interface to describe the layout of a stack frame on the target machine. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TARGET_TARGETFRAMELOWERING_H -#define LLVM_TARGET_TARGETFRAMELOWERING_H - -#include "llvm/CodeGen/MachineBasicBlock.h" -#include -#include - -namespace llvm { - class BitVector; - class CalleeSavedInfo; - class MachineFunction; - class RegScavenger; - -/// Information about stack frame layout on the target. It holds the direction -/// of stack growth, the known stack alignment on entry to each function, and -/// the offset to the locals area. -/// -/// The offset to the local area is the offset from the stack pointer on -/// function entry to the first location where function data (local variables, -/// spill locations) can be stored. -class TargetFrameLowering { -public: - enum StackDirection { - StackGrowsUp, // Adding to the stack increases the stack address - StackGrowsDown // Adding to the stack decreases the stack address - }; - - // Maps a callee saved register to a stack slot with a fixed offset. - struct SpillSlot { - unsigned Reg; - int Offset; // Offset relative to stack pointer on function entry. - }; -private: - StackDirection StackDir; - unsigned StackAlignment; - unsigned TransientStackAlignment; - int LocalAreaOffset; - bool StackRealignable; -public: - TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO, - unsigned TransAl = 1, bool StackReal = true) - : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl), - LocalAreaOffset(LAO), StackRealignable(StackReal) {} - - virtual ~TargetFrameLowering(); - - // These methods return information that describes the abstract stack layout - // of the target machine. - - /// getStackGrowthDirection - Return the direction the stack grows - /// - StackDirection getStackGrowthDirection() const { return StackDir; } - - /// getStackAlignment - This method returns the number of bytes to which the - /// stack pointer must be aligned on entry to a function. Typically, this - /// is the largest alignment for any data object in the target. - /// - unsigned getStackAlignment() const { return StackAlignment; } - - /// alignSPAdjust - This method aligns the stack adjustment to the correct - /// alignment. - /// - int alignSPAdjust(int SPAdj) const { - if (SPAdj < 0) { - SPAdj = -alignTo(-SPAdj, StackAlignment); - } else { - SPAdj = alignTo(SPAdj, StackAlignment); - } - return SPAdj; - } - - /// getTransientStackAlignment - This method returns the number of bytes to - /// which the stack pointer must be aligned at all times, even between - /// calls. - /// - unsigned getTransientStackAlignment() const { - return TransientStackAlignment; - } - - /// isStackRealignable - This method returns whether the stack can be - /// realigned. - bool isStackRealignable() const { - return StackRealignable; - } - - /// Return the skew that has to be applied to stack alignment under - /// certain conditions (e.g. stack was adjusted before function \p MF - /// was called). - virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const; - - /// getOffsetOfLocalArea - This method returns the offset of the local area - /// from the stack pointer on entrance to a function. - /// - int getOffsetOfLocalArea() const { return LocalAreaOffset; } - - /// isFPCloseToIncomingSP - Return true if the frame pointer is close to - /// the incoming stack pointer, false if it is close to the post-prologue - /// stack pointer. - virtual bool isFPCloseToIncomingSP() const { return true; } - - /// assignCalleeSavedSpillSlots - Allows target to override spill slot - /// assignment logic. If implemented, assignCalleeSavedSpillSlots() should - /// assign frame slots to all CSI entries and return true. If this method - /// returns false, spill slots will be assigned using generic implementation. - /// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of - /// CSI. - virtual bool - assignCalleeSavedSpillSlots(MachineFunction &MF, - const TargetRegisterInfo *TRI, - std::vector &CSI) const { - return false; - } - - /// getCalleeSavedSpillSlots - This method returns a pointer to an array of - /// pairs, that contains an entry for each callee saved register that must be - /// spilled to a particular stack location if it is spilled. - /// - /// Each entry in this array contains a pair, indicating the - /// fixed offset from the incoming stack pointer that each register should be - /// spilled at. If a register is not listed here, the code generator is - /// allowed to spill it anywhere it chooses. - /// - virtual const SpillSlot * - getCalleeSavedSpillSlots(unsigned &NumEntries) const { - NumEntries = 0; - return nullptr; - } - - /// targetHandlesStackFrameRounding - Returns true if the target is - /// responsible for rounding up the stack frame (probably at emitPrologue - /// time). - virtual bool targetHandlesStackFrameRounding() const { - return false; - } - - /// Returns true if the target will correctly handle shrink wrapping. - virtual bool enableShrinkWrapping(const MachineFunction &MF) const { - return false; - } - - /// Returns true if the stack slot holes in the fixed and callee-save stack - /// area should be used when allocating other stack locations to reduce stack - /// size. - virtual bool enableStackSlotScavenging(const MachineFunction &MF) const { - return false; - } - - /// emitProlog/emitEpilog - These methods insert prolog and epilog code into - /// the function. - virtual void emitPrologue(MachineFunction &MF, - MachineBasicBlock &MBB) const = 0; - virtual void emitEpilogue(MachineFunction &MF, - MachineBasicBlock &MBB) const = 0; - - /// Replace a StackProbe stub (if any) with the actual probe code inline - virtual void inlineStackProbe(MachineFunction &MF, - MachineBasicBlock &PrologueMBB) const {} - - /// Adjust the prologue to have the function use segmented stacks. This works - /// by adding a check even before the "normal" function prologue. - virtual void adjustForSegmentedStacks(MachineFunction &MF, - MachineBasicBlock &PrologueMBB) const {} - - /// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in - /// the assembly prologue to explicitly handle the stack. - virtual void adjustForHiPEPrologue(MachineFunction &MF, - MachineBasicBlock &PrologueMBB) const {} - - /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee - /// saved registers and returns true if it isn't possible / profitable to do - /// so by issuing a series of store instructions via - /// storeRegToStackSlot(). Returns false otherwise. - virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - const std::vector &CSI, - const TargetRegisterInfo *TRI) const { - return false; - } - - /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee - /// saved registers and returns true if it isn't possible / profitable to do - /// so by issuing a series of load instructions via loadRegToStackSlot(). - /// If it returns true, and any of the registers in CSI is not restored, - /// it sets the corresponding Restored flag in CSI to false. - /// Returns false otherwise. - virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - std::vector &CSI, - const TargetRegisterInfo *TRI) const { - return false; - } - - /// Return true if the target needs to disable frame pointer elimination. - virtual bool noFramePointerElim(const MachineFunction &MF) const; - - /// hasFP - Return true if the specified function should have a dedicated - /// frame pointer register. For most targets this is true only if the function - /// has variable sized allocas or if frame pointer elimination is disabled. - virtual bool hasFP(const MachineFunction &MF) const = 0; - - /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is - /// not required, we reserve argument space for call sites in the function - /// immediately on entry to the current function. This eliminates the need for - /// add/sub sp brackets around call sites. Returns true if the call frame is - /// included as part of the stack frame. - virtual bool hasReservedCallFrame(const MachineFunction &MF) const { - return !hasFP(MF); - } - - /// canSimplifyCallFramePseudos - When possible, it's best to simplify the - /// call frame pseudo ops before doing frame index elimination. This is - /// possible only when frame index references between the pseudos won't - /// need adjusting for the call frame adjustments. Normally, that's true - /// if the function has a reserved call frame or a frame pointer. Some - /// targets (Thumb2, for example) may have more complicated criteria, - /// however, and can override this behavior. - virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const { - return hasReservedCallFrame(MF) || hasFP(MF); - } - - // needsFrameIndexResolution - Do we need to perform FI resolution for - // this function. Normally, this is required only when the function - // has any stack objects. However, targets may want to override this. - virtual bool needsFrameIndexResolution(const MachineFunction &MF) const; - - /// getFrameIndexReference - This method should return the base register - /// and offset used to reference a frame index location. The offset is - /// returned directly, and the base register is returned via FrameReg. - virtual int getFrameIndexReference(const MachineFunction &MF, int FI, - unsigned &FrameReg) const; - - /// Same as \c getFrameIndexReference, except that the stack pointer (as - /// opposed to the frame pointer) will be the preferred value for \p - /// FrameReg. This is generally used for emitting statepoint or EH tables that - /// use offsets from RSP. If \p IgnoreSPUpdates is true, the returned - /// offset is only guaranteed to be valid with respect to the value of SP at - /// the end of the prologue. - virtual int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, - unsigned &FrameReg, - bool IgnoreSPUpdates) const { - // Always safe to dispatch to getFrameIndexReference. - return getFrameIndexReference(MF, FI, FrameReg); - } - - /// This method determines which of the registers reported by - /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved. - /// The default implementation checks populates the \p SavedRegs bitset with - /// all registers which are modified in the function, targets may override - /// this function to save additional registers. - /// This method also sets up the register scavenger ensuring there is a free - /// register or a frameindex available. - virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, - RegScavenger *RS = nullptr) const; - - /// processFunctionBeforeFrameFinalized - This method is called immediately - /// before the specified function's frame layout (MF.getFrameInfo()) is - /// finalized. Once the frame is finalized, MO_FrameIndex operands are - /// replaced with direct constants. This method is optional. - /// - virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF, - RegScavenger *RS = nullptr) const { - } - - virtual unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const { - report_fatal_error("WinEH not implemented for this target"); - } - - /// This method is called during prolog/epilog code insertion to eliminate - /// call frame setup and destroy pseudo instructions (but only if the Target - /// is using them). It is responsible for eliminating these instructions, - /// replacing them with concrete instructions. This method need only be - /// implemented if using call frame setup/destroy pseudo instructions. - /// Returns an iterator pointing to the instruction after the replaced one. - virtual MachineBasicBlock::iterator - eliminateCallFramePseudoInstr(MachineFunction &MF, - MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI) const { - llvm_unreachable("Call Frame Pseudo Instructions do not exist on this " - "target!"); - } - - - /// Order the symbols in the local stack frame. - /// The list of objects that we want to order is in \p objectsToAllocate as - /// indices into the MachineFrameInfo. The array can be reordered in any way - /// upon return. The contents of the array, however, may not be modified (i.e. - /// only their order may be changed). - /// By default, just maintain the original order. - virtual void - orderFrameObjects(const MachineFunction &MF, - SmallVectorImpl &objectsToAllocate) const { - } - - /// Check whether or not the given \p MBB can be used as a prologue - /// for the target. - /// The prologue will be inserted first in this basic block. - /// This method is used by the shrink-wrapping pass to decide if - /// \p MBB will be correctly handled by the target. - /// As soon as the target enable shrink-wrapping without overriding - /// this method, we assume that each basic block is a valid - /// prologue. - virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const { - return true; - } - - /// Check whether or not the given \p MBB can be used as a epilogue - /// for the target. - /// The epilogue will be inserted before the first terminator of that block. - /// This method is used by the shrink-wrapping pass to decide if - /// \p MBB will be correctly handled by the target. - /// As soon as the target enable shrink-wrapping without overriding - /// this method, we assume that each basic block is a valid - /// epilogue. - virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const { - return true; - } - - /// Check if given function is safe for not having callee saved registers. - /// This is used when interprocedural register allocation is enabled. - static bool isSafeForNoCSROpt(const Function *F) { - if (!F->hasLocalLinkage() || F->hasAddressTaken() || - !F->hasFnAttribute(Attribute::NoRecurse)) - return false; - // Function should not be optimized as tail call. - for (const User *U : F->users()) - if (auto CS = ImmutableCallSite(U)) - if (CS.isTailCall()) - return false; - return true; - } -}; - -} // End llvm namespace - -#endif -- cgit v1.2.1 From f1b2e0b26a4eac07e92c73c5aeaac14f83724198 Mon Sep 17 00:00:00 2001 From: Sean Fertile Date: Sat, 4 Nov 2017 01:54:20 +0000 Subject: Revert "[LTO][ThinLTO] Use the linker resolutions to mark global values ..." Changes more tests then expected on one of the build bots. reverting to investigate. This reverts https://llvm.org/svn/llvm-project/llvm/trunk@317374 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317395 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/ModuleSummaryIndex.h | 12 ++---------- include/llvm/IR/ModuleSummaryIndexYAML.h | 8 +++----- 2 files changed, 5 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h index b1e58a2a0d9b..2d664f41e3ce 100644 --- a/include/llvm/IR/ModuleSummaryIndex.h +++ b/include/llvm/IR/ModuleSummaryIndex.h @@ -148,15 +148,11 @@ public: /// In combined summary, indicate that the global value is live. unsigned Live : 1; - /// Indicates that the linker resolved the symbol to a definition from - /// within the same linkage unit. - unsigned DSOLocal : 1; - /// Convenience Constructors explicit GVFlags(GlobalValue::LinkageTypes Linkage, - bool NotEligibleToImport, bool Live, bool IsLocal) + bool NotEligibleToImport, bool Live) : Linkage(Linkage), NotEligibleToImport(NotEligibleToImport), - Live(Live), DSOLocal(IsLocal) {} + Live(Live) {} }; private: @@ -233,10 +229,6 @@ public: void setLive(bool Live) { Flags.Live = Live; } - void setDSOLocal(bool Local) { Flags.DSOLocal = Local; } - - bool isDSOLocal() const { return Flags.DSOLocal; } - /// Flag that this global value cannot be imported. void setNotEligibleToImport() { Flags.NotEligibleToImport = true; } diff --git a/include/llvm/IR/ModuleSummaryIndexYAML.h b/include/llvm/IR/ModuleSummaryIndexYAML.h index 4687f2d53e7e..2f9990ca03d8 100644 --- a/include/llvm/IR/ModuleSummaryIndexYAML.h +++ b/include/llvm/IR/ModuleSummaryIndexYAML.h @@ -135,7 +135,7 @@ template <> struct MappingTraits { struct FunctionSummaryYaml { unsigned Linkage; - bool NotEligibleToImport, Live, IsLocal; + bool NotEligibleToImport, Live; std::vector TypeTests; std::vector TypeTestAssumeVCalls, TypeCheckedLoadVCalls; @@ -177,7 +177,6 @@ template <> struct MappingTraits { io.mapOptional("Linkage", summary.Linkage); io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport); io.mapOptional("Live", summary.Live); - io.mapOptional("Local", summary.IsLocal); io.mapOptional("TypeTests", summary.TypeTests); io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls); io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls); @@ -212,7 +211,7 @@ template <> struct CustomMappingTraits { Elem.SummaryList.push_back(llvm::make_unique( GlobalValueSummary::GVFlags( static_cast(FSum.Linkage), - FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal), + FSum.NotEligibleToImport, FSum.Live), 0, FunctionSummary::FFlags{}, ArrayRef{}, ArrayRef{}, std::move(FSum.TypeTests), std::move(FSum.TypeTestAssumeVCalls), @@ -229,8 +228,7 @@ template <> struct CustomMappingTraits { FSums.push_back(FunctionSummaryYaml{ FSum->flags().Linkage, static_cast(FSum->flags().NotEligibleToImport), - static_cast(FSum->flags().Live), - static_cast(FSum->flags().DSOLocal), FSum->type_tests(), + static_cast(FSum->flags().Live), FSum->type_tests(), FSum->type_test_assume_vcalls(), FSum->type_checked_load_vcalls(), FSum->type_test_assume_const_vcalls(), FSum->type_checked_load_const_vcalls()}); -- cgit v1.2.1 From dcf1ffe8a0867a311092f2379195e9b646e42c1d Mon Sep 17 00:00:00 2001 From: Sean Fertile Date: Sat, 4 Nov 2017 17:04:39 +0000 Subject: [LTO][ThinLTO] Use the linker resolutions to mark global values as dso_local. Now that we have a way to mark GlobalValues as local we can use the symbol resolutions that the linker plugin provides as part of lto/thinlto link step to refine the compilers view on what symbols will end up being local. Originally commited as r317374, but reverted in r317395 to update some missed tests. Differential Revision: https://reviews.llvm.org/D35702 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317408 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/ModuleSummaryIndex.h | 12 ++++++++++-- include/llvm/IR/ModuleSummaryIndexYAML.h | 8 +++++--- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h index 2d664f41e3ce..b1e58a2a0d9b 100644 --- a/include/llvm/IR/ModuleSummaryIndex.h +++ b/include/llvm/IR/ModuleSummaryIndex.h @@ -148,11 +148,15 @@ public: /// In combined summary, indicate that the global value is live. unsigned Live : 1; + /// Indicates that the linker resolved the symbol to a definition from + /// within the same linkage unit. + unsigned DSOLocal : 1; + /// Convenience Constructors explicit GVFlags(GlobalValue::LinkageTypes Linkage, - bool NotEligibleToImport, bool Live) + bool NotEligibleToImport, bool Live, bool IsLocal) : Linkage(Linkage), NotEligibleToImport(NotEligibleToImport), - Live(Live) {} + Live(Live), DSOLocal(IsLocal) {} }; private: @@ -229,6 +233,10 @@ public: void setLive(bool Live) { Flags.Live = Live; } + void setDSOLocal(bool Local) { Flags.DSOLocal = Local; } + + bool isDSOLocal() const { return Flags.DSOLocal; } + /// Flag that this global value cannot be imported. void setNotEligibleToImport() { Flags.NotEligibleToImport = true; } diff --git a/include/llvm/IR/ModuleSummaryIndexYAML.h b/include/llvm/IR/ModuleSummaryIndexYAML.h index 2f9990ca03d8..4687f2d53e7e 100644 --- a/include/llvm/IR/ModuleSummaryIndexYAML.h +++ b/include/llvm/IR/ModuleSummaryIndexYAML.h @@ -135,7 +135,7 @@ template <> struct MappingTraits { struct FunctionSummaryYaml { unsigned Linkage; - bool NotEligibleToImport, Live; + bool NotEligibleToImport, Live, IsLocal; std::vector TypeTests; std::vector TypeTestAssumeVCalls, TypeCheckedLoadVCalls; @@ -177,6 +177,7 @@ template <> struct MappingTraits { io.mapOptional("Linkage", summary.Linkage); io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport); io.mapOptional("Live", summary.Live); + io.mapOptional("Local", summary.IsLocal); io.mapOptional("TypeTests", summary.TypeTests); io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls); io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls); @@ -211,7 +212,7 @@ template <> struct CustomMappingTraits { Elem.SummaryList.push_back(llvm::make_unique( GlobalValueSummary::GVFlags( static_cast(FSum.Linkage), - FSum.NotEligibleToImport, FSum.Live), + FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal), 0, FunctionSummary::FFlags{}, ArrayRef{}, ArrayRef{}, std::move(FSum.TypeTests), std::move(FSum.TypeTestAssumeVCalls), @@ -228,7 +229,8 @@ template <> struct CustomMappingTraits { FSums.push_back(FunctionSummaryYaml{ FSum->flags().Linkage, static_cast(FSum->flags().NotEligibleToImport), - static_cast(FSum->flags().Live), FSum->type_tests(), + static_cast(FSum->flags().Live), + static_cast(FSum->flags().DSOLocal), FSum->type_tests(), FSum->type_test_assume_vcalls(), FSum->type_checked_load_vcalls(), FSum->type_test_assume_const_vcalls(), FSum->type_checked_load_const_vcalls()}); -- cgit v1.2.1 From 3639f551552c1f692fae397fce7f09a410b776cb Mon Sep 17 00:00:00 2001 From: Aaron Ballman Date: Sat, 4 Nov 2017 19:59:14 +0000 Subject: Move the srpm, ocaml_make_directory, llvm_vcsrevision_h, and llvm-headers projects into the Misc folder on IDEs like Visual Studio rather than leave them in the root directory. NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317416 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/llvm/Support/CMakeLists.txt b/include/llvm/Support/CMakeLists.txt index 6104382c3e46..bf662c77351d 100644 --- a/include/llvm/Support/CMakeLists.txt +++ b/include/llvm/Support/CMakeLists.txt @@ -40,3 +40,4 @@ set_source_files_properties("${version_inc}" HEADER_FILE_ONLY TRUE) add_custom_target(llvm_vcsrevision_h DEPENDS "${version_inc}") +set_target_properties(llvm_vcsrevision_h PROPERTIES FOLDER "Misc") -- cgit v1.2.1 From 73fca5770735619630e7aa73cd2139420e140e27 Mon Sep 17 00:00:00 2001 From: Harlan Haskins Date: Sat, 4 Nov 2017 20:31:20 +0000 Subject: Use code voice for DIBuilder in LLVM C API (This is a test commit) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317422 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm-c/DebugInfo.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/llvm-c/DebugInfo.h b/include/llvm-c/DebugInfo.h index a27b351577a9..d17c690be4da 100644 --- a/include/llvm-c/DebugInfo.h +++ b/include/llvm-c/DebugInfo.h @@ -151,7 +151,7 @@ LLVMDIBuilderRef LLVMCreateDIBuilderDisallowUnresolved(LLVMModuleRef M); LLVMDIBuilderRef LLVMCreateDIBuilder(LLVMModuleRef M); /** - * Deallocates the DIBuilder and everything it owns. + * Deallocates the \c DIBuilder and everything it owns. * @note You must call \c LLVMDIBuilderFinalize before this */ void LLVMDisposeDIBuilder(LLVMDIBuilderRef Builder); @@ -199,7 +199,7 @@ LLVMMetadataRef LLVMDIBuilderCreateCompileUnit( /** * Create a file descriptor to hold debugging information for a file. - * \param Builder The DIBuilder. + * \param Builder The \c DIBuilder. * \param Filename File name. * \param FilenameLen The length of the C string passed to \c Filename. * \param Directory Directory. -- cgit v1.2.1 From c8200b76884d6c8166cbe114131009ebde31b6f6 Mon Sep 17 00:00:00 2001 From: "David L. Jones" Date: Mon, 6 Nov 2017 00:32:01 +0000 Subject: [PassManager, SimplifyCFG] Revert r316908 and r316869. These cause Clang to crash with a segfault. See PR35210 for details. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317444 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Transforms/Scalar/SimplifyCFG.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/llvm/Transforms/Scalar/SimplifyCFG.h b/include/llvm/Transforms/Scalar/SimplifyCFG.h index ed6b1b1853b1..e955673283ec 100644 --- a/include/llvm/Transforms/Scalar/SimplifyCFG.h +++ b/include/llvm/Transforms/Scalar/SimplifyCFG.h @@ -31,16 +31,16 @@ class SimplifyCFGPass : public PassInfoMixin { SimplifyCFGOptions Options; public: - /// The default constructor sets the pass options to create canonical IR, - /// rather than optimal IR. That is, by default we bypass transformations that - /// are likely to improve performance but make analysis for other passes more - /// difficult. + /// The default constructor sets the pass options to create optimal IR, + /// rather than canonical IR. That is, by default we do transformations that + /// are likely to improve performance but make analysis more difficult. + /// FIXME: This is inverted from what most instantiations of the pass should + /// be. SimplifyCFGPass() : SimplifyCFGPass(SimplifyCFGOptions() - .forwardSwitchCondToPhi(false) - .convertSwitchToLookupTable(false) - .needCanonicalLoops(true)) {} - + .forwardSwitchCondToPhi(true) + .convertSwitchToLookupTable(true) + .needCanonicalLoops(false)) {} /// Construct a pass with optional optimizations. SimplifyCFGPass(const SimplifyCFGOptions &PassOptions); -- cgit v1.2.1 From 8738ed486fb9f07e8dc177e2879cfd612771573a Mon Sep 17 00:00:00 2001 From: Martin Storsjo Date: Mon, 6 Nov 2017 07:20:58 +0000 Subject: [ObjectYAML] Map relocation types for COFF ARMNT and ARM64 Differential Revision: https://reviews.llvm.org/D39668 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317459 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/ObjectYAML/COFFYAML.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/llvm/ObjectYAML/COFFYAML.h b/include/llvm/ObjectYAML/COFFYAML.h index bbceefac3d94..1fce46c125f7 100644 --- a/include/llvm/ObjectYAML/COFFYAML.h +++ b/include/llvm/ObjectYAML/COFFYAML.h @@ -157,6 +157,16 @@ struct ScalarEnumerationTraits { static void enumeration(IO &IO, COFF::RelocationTypeAMD64 &Value); }; +template <> +struct ScalarEnumerationTraits { + static void enumeration(IO &IO, COFF::RelocationTypesARM &Value); +}; + +template <> +struct ScalarEnumerationTraits { + static void enumeration(IO &IO, COFF::RelocationTypesARM64 &Value); +}; + template <> struct ScalarEnumerationTraits { static void enumeration(IO &IO, COFF::WindowsSubsystem &Value); -- cgit v1.2.1 From 00e900afdbd5dc97330de6bc0b8b09db1dcac9f7 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Mon, 6 Nov 2017 16:27:15 +0000 Subject: [IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag As discussed on llvm-dev: http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html and again more recently: http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html ...this is a step in cleaning up our fast-math-flags implementation in IR to better match the capabilities of both clang's user-visible flags and the backend's flags for SDNode. As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the 'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic reassociation - 'AllowReassoc'. We're also adding a bit to allow approximations for library functions called 'ApproxFunc' (this was initially proposed as 'libm' or similar). ...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits), but that's apparently already used for other purposes. Also, I don't think we can just add a field to FPMathOperator because Operator is not intended to be instantiated. We'll defer movement of FMF to another day. We keep the 'fast' keyword. I thought about removing that, but seeing IR like this: %f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2 ...made me think we want to keep the shortcut synonym. Finally, this change is binary incompatible with existing IR as seen in the compatibility tests. This statement: "Newer releases can ignore features from older releases, but they cannot miscompile them. For example, if nsw is ever replaced with something else, dropping it would be a valid way to upgrade the IR." ( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility ) ...provides the flexibility we want to make this change without requiring a new IR version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will fail to optimize some previously 'fast' code because it's no longer recognized as 'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'. Note: an inter-dependent clang commit to use the new API name should closely follow commit. Differential Revision: https://reviews.llvm.org/D39304 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317488 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/Instruction.h | 24 +++++-- include/llvm/IR/Operator.h | 113 +++++++++++++++++++----------- include/llvm/Transforms/Utils/LoopUtils.h | 6 +- 3 files changed, 93 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/include/llvm/IR/Instruction.h b/include/llvm/IR/Instruction.h index 66b1e7e01fe4..41f379b87c23 100644 --- a/include/llvm/IR/Instruction.h +++ b/include/llvm/IR/Instruction.h @@ -308,10 +308,15 @@ public: /// Determine whether the exact flag is set. bool isExact() const; - /// Set or clear the unsafe-algebra flag on this instruction, which must be an + /// Set or clear all fast-math-flags on this instruction, which must be an /// operator which supports this flag. See LangRef.html for the meaning of /// this flag. - void setHasUnsafeAlgebra(bool B); + void setFast(bool B); + + /// Set or clear the reassociation flag on this instruction, which must be + /// an operator which supports this flag. See LangRef.html for the meaning of + /// this flag. + void setHasAllowReassoc(bool B); /// Set or clear the no-nans flag on this instruction, which must be an /// operator which supports this flag. See LangRef.html for the meaning of @@ -333,6 +338,11 @@ public: /// this flag. void setHasAllowReciprocal(bool B); + /// Set or clear the approximate-math-functions flag on this instruction, + /// which must be an operator which supports this flag. See LangRef.html for + /// the meaning of this flag. + void setHasApproxFunc(bool B); + /// Convenience function for setting multiple fast-math flags on this /// instruction, which must be an operator which supports these flags. See /// LangRef.html for the meaning of these flags. @@ -343,8 +353,11 @@ public: /// LangRef.html for the meaning of these flags. void copyFastMathFlags(FastMathFlags FMF); - /// Determine whether the unsafe-algebra flag is set. - bool hasUnsafeAlgebra() const; + /// Determine whether all fast-math-flags are set. + bool isFast() const; + + /// Determine whether the allow-reassociation flag is set. + bool hasAllowReassoc() const; /// Determine whether the no-NaNs flag is set. bool hasNoNaNs() const; @@ -361,6 +374,9 @@ public: /// Determine whether the allow-contract flag is set. bool hasAllowContract() const; + /// Determine whether the approximate-math-functions flag is set. + bool hasApproxFunc() const; + /// Convenience function for getting all the fast-math flags, which must be an /// operator which supports these flags. See LangRef.html for the meaning of /// these flags. diff --git a/include/llvm/IR/Operator.h b/include/llvm/IR/Operator.h index ae9255174a31..01746e4b6a29 100644 --- a/include/llvm/IR/Operator.h +++ b/include/llvm/IR/Operator.h @@ -163,52 +163,61 @@ private: unsigned Flags = 0; - FastMathFlags(unsigned F) : Flags(F) { } + FastMathFlags(unsigned F) { + // If all 7 bits are set, turn this into -1. If the number of bits grows, + // this must be updated. This is intended to provide some forward binary + // compatibility insurance for the meaning of 'fast' in case bits are added. + if (F == 0x7F) Flags = ~0U; + else Flags = F; + } public: - /// This is how the bits are used in Value::SubclassOptionalData so they - /// should fit there too. + // This is how the bits are used in Value::SubclassOptionalData so they + // should fit there too. + // WARNING: We're out of space. SubclassOptionalData only has 7 bits. New + // functionality will require a change in how this information is stored. enum { - UnsafeAlgebra = (1 << 0), + AllowReassoc = (1 << 0), NoNaNs = (1 << 1), NoInfs = (1 << 2), NoSignedZeros = (1 << 3), AllowReciprocal = (1 << 4), - AllowContract = (1 << 5) + AllowContract = (1 << 5), + ApproxFunc = (1 << 6) }; FastMathFlags() = default; - /// Whether any flag is set bool any() const { return Flags != 0; } + bool none() const { return Flags == 0; } + bool all() const { return Flags == ~0U; } - /// Set all the flags to false void clear() { Flags = 0; } + void set() { Flags = ~0U; } /// Flag queries + bool allowReassoc() const { return 0 != (Flags & AllowReassoc); } bool noNaNs() const { return 0 != (Flags & NoNaNs); } bool noInfs() const { return 0 != (Flags & NoInfs); } bool noSignedZeros() const { return 0 != (Flags & NoSignedZeros); } bool allowReciprocal() const { return 0 != (Flags & AllowReciprocal); } - bool allowContract() const { return 0 != (Flags & AllowContract); } - bool unsafeAlgebra() const { return 0 != (Flags & UnsafeAlgebra); } + bool allowContract() const { return 0 != (Flags & AllowContract); } + bool approxFunc() const { return 0 != (Flags & ApproxFunc); } + /// 'Fast' means all bits are set. + bool isFast() const { return all(); } /// Flag setters + void setAllowReassoc() { Flags |= AllowReassoc; } void setNoNaNs() { Flags |= NoNaNs; } void setNoInfs() { Flags |= NoInfs; } void setNoSignedZeros() { Flags |= NoSignedZeros; } void setAllowReciprocal() { Flags |= AllowReciprocal; } + // TODO: Change the other set* functions to take a parameter? void setAllowContract(bool B) { Flags = (Flags & ~AllowContract) | B * AllowContract; } - void setUnsafeAlgebra() { - Flags |= UnsafeAlgebra; - setNoNaNs(); - setNoInfs(); - setNoSignedZeros(); - setAllowReciprocal(); - setAllowContract(true); - } + void setApproxFunc() { Flags |= ApproxFunc; } + void setFast() { set(); } void operator&=(const FastMathFlags &OtherFlags) { Flags &= OtherFlags.Flags; @@ -221,18 +230,21 @@ class FPMathOperator : public Operator { private: friend class Instruction; - void setHasUnsafeAlgebra(bool B) { + /// 'Fast' means all bits are set. + void setFast(bool B) { + setHasAllowReassoc(B); + setHasNoNaNs(B); + setHasNoInfs(B); + setHasNoSignedZeros(B); + setHasAllowReciprocal(B); + setHasAllowContract(B); + setHasApproxFunc(B); + } + + void setHasAllowReassoc(bool B) { SubclassOptionalData = - (SubclassOptionalData & ~FastMathFlags::UnsafeAlgebra) | - (B * FastMathFlags::UnsafeAlgebra); - - // Unsafe algebra implies all the others - if (B) { - setHasNoNaNs(true); - setHasNoInfs(true); - setHasNoSignedZeros(true); - setHasAllowReciprocal(true); - } + (SubclassOptionalData & ~FastMathFlags::AllowReassoc) | + (B * FastMathFlags::AllowReassoc); } void setHasNoNaNs(bool B) { @@ -265,6 +277,12 @@ private: (B * FastMathFlags::AllowContract); } + void setHasApproxFunc(bool B) { + SubclassOptionalData = + (SubclassOptionalData & ~FastMathFlags::ApproxFunc) | + (B * FastMathFlags::ApproxFunc); + } + /// Convenience function for setting multiple fast-math flags. /// FMF is a mask of the bits to set. void setFastMathFlags(FastMathFlags FMF) { @@ -278,42 +296,53 @@ private: } public: - /// Test whether this operation is permitted to be - /// algebraically transformed, aka the 'A' fast-math property. - bool hasUnsafeAlgebra() const { - return (SubclassOptionalData & FastMathFlags::UnsafeAlgebra) != 0; + /// Test if this operation allows all non-strict floating-point transforms. + bool isFast() const { + return ((SubclassOptionalData & FastMathFlags::AllowReassoc) != 0 && + (SubclassOptionalData & FastMathFlags::NoNaNs) != 0 && + (SubclassOptionalData & FastMathFlags::NoInfs) != 0 && + (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0 && + (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0 && + (SubclassOptionalData & FastMathFlags::AllowContract) != 0 && + (SubclassOptionalData & FastMathFlags::ApproxFunc) != 0); + } + + /// Test if this operation may be simplified with reassociative transforms. + bool hasAllowReassoc() const { + return (SubclassOptionalData & FastMathFlags::AllowReassoc) != 0; } - /// Test whether this operation's arguments and results are to be - /// treated as non-NaN, aka the 'N' fast-math property. + /// Test if this operation's arguments and results are assumed not-NaN. bool hasNoNaNs() const { return (SubclassOptionalData & FastMathFlags::NoNaNs) != 0; } - /// Test whether this operation's arguments and results are to be - /// treated as NoN-Inf, aka the 'I' fast-math property. + /// Test if this operation's arguments and results are assumed not-infinite. bool hasNoInfs() const { return (SubclassOptionalData & FastMathFlags::NoInfs) != 0; } - /// Test whether this operation can treat the sign of zero - /// as insignificant, aka the 'S' fast-math property. + /// Test if this operation can ignore the sign of zero. bool hasNoSignedZeros() const { return (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0; } - /// Test whether this operation is permitted to use - /// reciprocal instead of division, aka the 'R' fast-math property. + /// Test if this operation can use reciprocal multiply instead of division. bool hasAllowReciprocal() const { return (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0; } - /// Test whether this operation is permitted to - /// be floating-point contracted. + /// Test if this operation can be floating-point contracted (FMA). bool hasAllowContract() const { return (SubclassOptionalData & FastMathFlags::AllowContract) != 0; } + /// Test if this operation allows approximations of math library functions or + /// intrinsics. + bool hasApproxFunc() const { + return (SubclassOptionalData & FastMathFlags::ApproxFunc) != 0; + } + /// Convenience function for getting all the fast-math flags FastMathFlags getFastMathFlags() const { return FastMathFlags(SubclassOptionalData); diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h index 650224610ad2..a59b188f8d6c 100644 --- a/include/llvm/Transforms/Utils/LoopUtils.h +++ b/include/llvm/Transforms/Utils/LoopUtils.h @@ -331,15 +331,13 @@ public: /// not have the "fast-math" property. Such operation requires a relaxed FP /// mode. bool hasUnsafeAlgebra() { - return InductionBinOp && - !cast(InductionBinOp)->hasUnsafeAlgebra(); + return InductionBinOp && !cast(InductionBinOp)->isFast(); } /// Returns induction operator that does not have "fast-math" property /// and requires FP unsafe mode. Instruction *getUnsafeAlgebraInst() { - if (!InductionBinOp || - cast(InductionBinOp)->hasUnsafeAlgebra()) + if (!InductionBinOp || cast(InductionBinOp)->isFast()) return nullptr; return InductionBinOp; } -- cgit v1.2.1 From 4cbab70b62524aeaa820e691ffe770dd42027856 Mon Sep 17 00:00:00 2001 From: Bjorn Pettersson Date: Mon, 6 Nov 2017 21:46:06 +0000 Subject: [MIRPrinter] Use %subreg.xxx syntax for subregister index operands Summary: Print %subreg. instead of just the subregister index when printing immediate operands corresponding to subreg indices in INSERT_SUBREG, EXTRACT_SUBREG, SUBREG_TO_REG and REG_SEQUENCE. Reviewers: qcolombet, MatzeB Reviewed By: MatzeB Subscribers: nhaehnle, javed.absar, llvm-commits Differential Revision: https://reviews.llvm.org/D39696 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317513 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/MachineInstr.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h index 7523825285a6..88a697055e83 100644 --- a/include/llvm/CodeGen/MachineInstr.h +++ b/include/llvm/CodeGen/MachineInstr.h @@ -301,6 +301,21 @@ public: return Operands[i]; } + /// Return true if operand \p OpIdx is a subregister index. + bool isOperandSubregIdx(unsigned OpIdx) const { + assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate && + "Expected MO_Immediate operand type."); + if (isExtractSubreg() && OpIdx == 2) + return true; + if (isInsertSubreg() && OpIdx == 3) + return true; + if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0) + return true; + if (isSubregToReg() && OpIdx == 3) + return true; + return false; + } + /// Returns the number of non-implicit operands. unsigned getNumExplicitOperands() const; -- cgit v1.2.1 From 0ffa8796b2c006cd03f9dce0273e5a099befb870 Mon Sep 17 00:00:00 2001 From: Vedant Kumar Date: Mon, 6 Nov 2017 23:15:21 +0000 Subject: [DebugInfo] Unify logic to merge DILocations. NFC. This makes DILocation::getMergedLocation() do what its comment says it does when merging locations for an Instruction: set the common inlineAt scope. This simplifies Instruction::applyMergedLocation() a bit. Testing: check-llvm, check-clang Differential Revision: https://reviews.llvm.org/D39628 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317524 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/DebugInfoMetadata.h | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h index bee8cf8a39d9..b1f08e7c67cd 100644 --- a/include/llvm/IR/DebugInfoMetadata.h +++ b/include/llvm/IR/DebugInfoMetadata.h @@ -1419,19 +1419,15 @@ public: /// represented in a single line entry. In this case, no location /// should be set, unless the merged instruction is a call, which we will /// set the merged debug location as line 0 of the nearest common scope - /// where 2 locations are inlined from. This only applies to Instruction, - /// For MachineInstruction, as it is post-inline, we will treat the call + /// where 2 locations are inlined from. This only applies to Instruction; + /// for MachineInstruction, as it is post-inline, we will treat the call /// instruction the same way as other instructions. /// - /// This should only be used by MachineInstruction because call can be - /// treated the same as other instructions. Otherwise, use - /// \p applyMergedLocation instead. - static const DILocation *getMergedLocation(const DILocation *LocA, - const DILocation *LocB) { - if (LocA && LocB && (LocA == LocB || !LocA->canDiscriminate(*LocB))) - return LocA; - return nullptr; - } + /// \p ForInst: The Instruction the merged DILocation is for. If the + /// Instruction is unavailable or non-existent, use nullptr. + static const DILocation * + getMergedLocation(const DILocation *LocA, const DILocation *LocB, + const Instruction *ForInst = nullptr); /// Returns the base discriminator for a given encoded discriminator \p D. static unsigned getBaseDiscriminatorFromDiscriminator(unsigned D) { -- cgit v1.2.1 From 964a48a5b6b8f8b3ca36dc4f769ba64d0a56aa57 Mon Sep 17 00:00:00 2001 From: Davide Italiano Date: Tue, 7 Nov 2017 00:09:25 +0000 Subject: [IPO/LowerTypesTest] Skip blockaddress(es) when replacing uses. Blockaddresses refer to the function itself, therefore replacing them would cause an assertion in doRAUW. Fixes https://bugs.llvm.org/show_bug.cgi?id=35201 This was found when trying CFI on a proprietary kernel by Dmitry Mikulin. Differential Revision: https://reviews.llvm.org/D39695 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317527 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/Value.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/llvm/IR/Value.h b/include/llvm/IR/Value.h index 9e4914973edf..f50f01726859 100644 --- a/include/llvm/IR/Value.h +++ b/include/llvm/IR/Value.h @@ -299,6 +299,12 @@ public: /// values or constant users. void replaceUsesOutsideBlock(Value *V, BasicBlock *BB); + /// replaceUsesExceptBlockAddr - Go through the uses list for this definition + /// and make each use point to "V" instead of "this" when the use is outside + /// the block. 'This's use list is expected to have at least one element. + /// Unlike replaceAllUsesWith this function skips blockaddr uses. + void replaceUsesExceptBlockAddr(Value *New); + //---------------------------------------------------------------------- // Methods for handling the chain of uses of this Value. // -- cgit v1.2.1 From 0227fe59a98b795b40e1e4b1b08cbe373d4c54c7 Mon Sep 17 00:00:00 2001 From: Adrian Prantl Date: Tue, 7 Nov 2017 00:45:34 +0000 Subject: Make DIExpression::createFragmentExpression() return an Optional. We can't safely split arithmetic into multiple fragments because we can't express carry-over between fragments. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317534 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/DebugInfoMetadata.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h index b1f08e7c67cd..c515f6de2d8c 100644 --- a/include/llvm/IR/DebugInfoMetadata.h +++ b/include/llvm/IR/DebugInfoMetadata.h @@ -2306,9 +2306,11 @@ public: /// /// \param OffsetInBits Offset of the piece in bits. /// \param SizeInBits Size of the piece in bits. - static DIExpression *createFragmentExpression(const DIExpression *Exp, - unsigned OffsetInBits, - unsigned SizeInBits); + /// \return Creating a fragment expression may fail if \c Expr + /// contains arithmetic operations that would be truncated. + static Optional + createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, + unsigned SizeInBits); }; /// Global variables. -- cgit v1.2.1 From b79469ca2fbe21e9e0b9b85f61711cdbfb44b17a Mon Sep 17 00:00:00 2001 From: Kristof Beyls Date: Tue, 7 Nov 2017 10:34:34 +0000 Subject: [GlobalISel] Enable legalizing non-power-of-2 sized types. This changes the interface of how targets describe how to legalize, see the below description. 1. Interface for targets to describe how to legalize. In GlobalISel, the API in the LegalizerInfo class is the main interface for targets to specify which types are legal for which operations, and what to do to turn illegal type/operation combinations into legal ones. For each operation the type sizes that can be legalized without having to change the size of the type are specified with a call to setAction. This isn't different to how GlobalISel worked before. For example, for a target that supports 32 and 64 bit adds natively: for (auto Ty : {s32, s64}) setAction({G_ADD, 0, s32}, Legal); or for a target that needs a library call for a 32 bit division: setAction({G_SDIV, s32}, Libcall); The main conceptual change to the LegalizerInfo API, is in specifying how to legalize the type sizes for which a change of size is needed. For example, in the above example, how to specify how all types from i1 to i8388607 (apart from s32 and s64 which are legal) need to be legalized and expressed in terms of operations on the available legal sizes (again, i32 and i64 in this case). Before, the implementation only allowed specifying power-of-2-sized types (e.g. setAction({G_ADD, 0, s128}, NarrowScalar). A worse limitation was that if you'd wanted to specify how to legalize all the sized types as allowed by the LLVM-IR LangRef, i1 to i8388607, you'd have to call setAction 8388607-3 times and probably would need a lot of memory to store all of these specifications. Instead, the legalization actions that need to change the size of the type are specified now using a "SizeChangeStrategy". For example: setLegalizeScalarToDifferentSizeStrategy( G_ADD, 0, widenToLargerAndNarrowToLargest); This example indicates that for type sizes for which there is a larger size that can be legalized towards, do it by Widening the size. For example, G_ADD on s17 will be legalized by first doing WidenScalar to make it s32, after which it's legal. The "NarrowToLargest" indicates what to do if there is no larger size that can be legalized towards. E.g. G_ADD on s92 will be legalized by doing NarrowScalar to s64. Another example, taken from the ARM backend is: for (unsigned Op : {G_SDIV, G_UDIV}) { setLegalizeScalarToDifferentSizeStrategy(Op, 0, widenToLargerTypesUnsupportedOtherwise); if (ST.hasDivideInARMMode()) setAction({Op, s32}, Legal); else setAction({Op, s32}, Libcall); } For this example, G_SDIV on s8, on a target without a divide instruction, would be legalized by first doing action (WidenScalar, s32), followed by (Libcall, s32). The same principle is also followed for when the number of vector lanes on vector data types need to be changed, e.g.: setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal); setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal); setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal); setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal); setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal); setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal); setLegalizeVectorElementToDifferentSizeStrategy( G_ADD, 0, widenToLargerTypesUnsupportedOtherwise); As currently implemented here, vector types are legalized by first making the vector element size legal, followed by then making the number of lanes legal. The strategy to follow in the first step is set by a call to setLegalizeVectorElementToDifferentSizeStrategy, see example above. The strategy followed in the second step "moreToWiderTypesAndLessToWidest" (see code for its definition), indicating that vectors are widened to more elements so they map to natively supported vector widths, or when there isn't a legal wider vector, split the vector to map it to the widest vector supported. Therefore, for the above specification, some example legalizations are: * getAction({G_ADD, LLT::vector(3, 3)}) returns {WidenScalar, LLT::vector(3, 8)} * getAction({G_ADD, LLT::vector(3, 8)}) then returns {MoreElements, LLT::vector(8, 8)} * getAction({G_ADD, LLT::vector(20, 8)}) returns {FewerElements, LLT::vector(16, 8)} 2. Key implementation aspects. How to legalize a specific (operation, type index, size) tuple is represented by mapping intervals of integers representing a range of size types to an action to take, e.g.: setScalarAction({G_ADD, LLT:scalar(1)}, {{1, WidenScalar}, // bit sizes [ 1, 31[ {32, Legal}, // bit sizes [32, 33[ {33, WidenScalar}, // bit sizes [33, 64[ {64, Legal}, // bit sizes [64, 65[ {65, NarrowScalar} // bit sizes [65, +inf[ }); Please note that most of the code to do the actual lowering of non-power-of-2 sized types is currently missing, this is just trying to make it possible for targets to specify what is legal, and how non-legal types should be legalized. Probably quite a bit of further work is needed in the actual legalizing and the other passes in GlobalISel to support non-power-of-2 sized types. I hope the documentation in LegalizerInfo.h and the examples provided in the various {Target}LegalizerInfo.cpp and LegalizerInfoTest.cpp explains well enough how this is meant to be used. This drops the need for LLT::{half,double}...Size(). Differential Revision: https://reviews.llvm.org/D30529 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317560 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/GlobalISel/LegalizerInfo.h | 377 +++++++++++++++++++----- include/llvm/Support/LowLevelTypeImpl.h | 45 --- 2 files changed, 302 insertions(+), 120 deletions(-) (limited to 'include') diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h index b229411c8148..4055ab112912 100644 --- a/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h +++ b/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h @@ -26,6 +26,7 @@ #include #include #include +#include namespace llvm { @@ -120,27 +121,144 @@ public: } } + typedef std::pair SizeAndAction; + typedef std::vector SizeAndActionsVec; + using SizeChangeStrategy = + std::function; + /// More friendly way to set an action for common types that have an LLT /// representation. + /// The LegalizeAction must be one for which NeedsLegalizingToDifferentSize + /// returns false. void setAction(const InstrAspect &Aspect, LegalizeAction Action) { + assert(!needsLegalizingToDifferentSize(Action)); TablesInitialized = false; - unsigned Opcode = Aspect.Opcode - FirstOp; - if (Actions[Opcode].size() <= Aspect.Idx) - Actions[Opcode].resize(Aspect.Idx + 1); - Actions[Aspect.Opcode - FirstOp][Aspect.Idx][Aspect.Type] = Action; + const unsigned OpcodeIdx = Aspect.Opcode - FirstOp; + if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx) + SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1); + SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action; } - /// If an operation on a given vector type (say ) isn't explicitly - /// specified, we proceed in 2 stages. First we legalize the underlying scalar - /// (so that there's at least one legal vector with that scalar), then we - /// adjust the number of elements in the vector so that it is legal. The - /// desired action in the first step is controlled by this function. - void setScalarInVectorAction(unsigned Opcode, LLT ScalarTy, - LegalizeAction Action) { - assert(!ScalarTy.isVector()); - ScalarInVectorActions[std::make_pair(Opcode, ScalarTy)] = Action; + /// The setAction calls record the non-size-changing legalization actions + /// to take on specificly-sized types. The SizeChangeStrategy defines what + /// to do when the size of the type needs to be changed to reach a legally + /// sized type (i.e., one that was defined through a setAction call). + /// e.g. + /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal); + /// setLegalizeScalarToDifferentSizeStrategy( + /// G_ADD, 0, widenToLargerTypesAndNarrowToLargest); + /// will end up defining getAction({G_ADD, 0, T}) to return the following + /// actions for different scalar types T: + /// LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)} + /// LLT::scalar(32): {Legal, 0, LLT::scalar(32)} + /// LLT::scalar(33)..: {NarrowScalar, 0, LLT::scalar(32)} + /// + /// If no SizeChangeAction gets defined, through this function, + /// the default is unsupportedForDifferentSizes. + void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode, + const unsigned TypeIdx, + SizeChangeStrategy S) { + const unsigned OpcodeIdx = Opcode - FirstOp; + if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx) + ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1); + ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S; + } + + /// See also setLegalizeScalarToDifferentSizeStrategy. + /// This function allows to set the SizeChangeStrategy for vector elements. + void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode, + const unsigned TypeIdx, + SizeChangeStrategy S) { + const unsigned OpcodeIdx = Opcode - FirstOp; + if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx) + VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1); + VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S; + } + + /// A SizeChangeStrategy for the common case where legalization for a + /// particular operation consists of only supporting a specific set of type + /// sizes. E.g. + /// setAction ({G_DIV, 0, LLT::scalar(32)}, Legal); + /// setAction ({G_DIV, 0, LLT::scalar(64)}, Legal); + /// setLegalizeScalarToDifferentSizeStrategy( + /// G_DIV, 0, unsupportedForDifferentSizes); + /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64, + /// and Unsupported for all other scalar types T. + static SizeAndActionsVec + unsupportedForDifferentSizes(const SizeAndActionsVec &v) { + return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported, + Unsupported); + } + + /// A SizeChangeStrategy for the common case where legalization for a + /// particular operation consists of widening the type to a large legal type, + /// unless there is no such type and then instead it should be narrowed to the + /// largest legal type. + static SizeAndActionsVec + widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) { + assert(v.size() > 0 && + "At least one size that can be legalized towards is needed" + " for this SizeChangeStrategy"); + return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar, + NarrowScalar); + } + + static SizeAndActionsVec + widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) { + return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar, + Unsupported); + } + + static SizeAndActionsVec + narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) { + return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar, + Unsupported); + } + + static SizeAndActionsVec + narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) { + assert(v.size() > 0 && + "At least one size that can be legalized towards is needed" + " for this SizeChangeStrategy"); + return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar, + WidenScalar); + } + + /// A SizeChangeStrategy for the common case where legalization for a + /// particular vector operation consists of having more elements in the + /// vector, to a type that is legal. Unless there is no such type and then + /// instead it should be legalized towards the widest vector that's still + /// legal. E.g. + /// setAction({G_ADD, LLT::vector(8, 8)}, Legal); + /// setAction({G_ADD, LLT::vector(16, 8)}, Legal); + /// setAction({G_ADD, LLT::vector(2, 32)}, Legal); + /// setAction({G_ADD, LLT::vector(4, 32)}, Legal); + /// setLegalizeVectorElementToDifferentSizeStrategy( + /// G_ADD, 0, moreToWiderTypesAndLessToWidest); + /// will result in the following getAction results: + /// * getAction({G_ADD, LLT::vector(8,8)}) returns + /// (Legal, vector(8,8)). + /// * getAction({G_ADD, LLT::vector(9,8)}) returns + /// (MoreElements, vector(16,8)). + /// * getAction({G_ADD, LLT::vector(8,32)}) returns + /// (FewerElements, vector(4,32)). + static SizeAndActionsVec + moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) { + return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements, + FewerElements); } + /// Helper function to implement many typical SizeChangeStrategy functions. + static SizeAndActionsVec + increaseToLargerTypesAndDecreaseToLargest(const SizeAndActionsVec &v, + LegalizeAction IncreaseAction, + LegalizeAction DecreaseAction); + /// Helper function to implement many typical SizeChangeStrategy functions. + static SizeAndActionsVec + decreaseToSmallerTypesAndIncreaseToSmallest(const SizeAndActionsVec &v, + LegalizeAction DecreaseAction, + LegalizeAction IncreaseAction); + /// Determine what action should be taken to legalize the given generic /// instruction opcode, type-index and type. Requires computeTables to have /// been called. @@ -158,58 +276,6 @@ public: std::tuple getAction(const MachineInstr &MI, const MachineRegisterInfo &MRI) const; - /// Iterate the given function (typically something like doubling the width) - /// on Ty until we find a legal type for this operation. - Optional findLegalizableSize(const InstrAspect &Aspect, - function_ref NextType) const { - if (Aspect.Idx >= Actions[Aspect.Opcode - FirstOp].size()) - return None; - - LegalizeAction Action; - const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx]; - LLT Ty = Aspect.Type; - do { - Ty = NextType(Ty); - auto ActionIt = Map.find(Ty); - if (ActionIt == Map.end()) { - auto DefaultIt = DefaultActions.find(Aspect.Opcode); - if (DefaultIt == DefaultActions.end()) - return None; - Action = DefaultIt->second; - } else - Action = ActionIt->second; - } while (needsLegalizingToDifferentSize(Action)); - return Ty; - } - - /// Find what type it's actually OK to perform the given operation on, given - /// the general approach we've decided to take. - Optional findLegalType(const InstrAspect &Aspect, LegalizeAction Action) const; - - std::pair findLegalAction(const InstrAspect &Aspect, - LegalizeAction Action) const { - auto LegalType = findLegalType(Aspect, Action); - if (!LegalType) - return std::make_pair(LegalizeAction::Unsupported, LLT()); - return std::make_pair(Action, *LegalType); - } - - /// Find the specified \p Aspect in the primary (explicitly set) Actions - /// table. Returns either the action the target requested or NotFound if there - /// was no setAction call. - LegalizeAction findInActions(const InstrAspect &Aspect) const { - if (Aspect.Opcode < FirstOp || Aspect.Opcode > LastOp) - return NotFound; - if (Aspect.Idx >= Actions[Aspect.Opcode - FirstOp].size()) - return NotFound; - const TypeMap &Map = Actions[Aspect.Opcode - FirstOp][Aspect.Idx]; - auto ActionIt = Map.find(Aspect.Type); - if (ActionIt == Map.end()) - return NotFound; - - return ActionIt->second; - } - bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const; virtual bool legalizeCustom(MachineInstr &MI, @@ -217,20 +283,181 @@ public: MachineIRBuilder &MIRBuilder) const; private: - static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START; - static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END; + /// The SizeAndActionsVec is a representation mapping between all natural + /// numbers and an Action. The natural number represents the bit size of + /// the InstrAspect. For example, for a target with native support for 32-bit + /// and 64-bit additions, you'd express that as: + /// setScalarAction(G_ADD, 0, + /// {{1, WidenScalar}, // bit sizes [ 1, 31[ + /// {32, Legal}, // bit sizes [32, 33[ + /// {33, WidenScalar}, // bit sizes [33, 64[ + /// {64, Legal}, // bit sizes [64, 65[ + /// {65, NarrowScalar} // bit sizes [65, +inf[ + /// }); + /// It may be that only 64-bit pointers are supported on your target: + /// setPointerAction(G_GEP, 0, LLT:pointer(1), + /// {{1, Unsupported}, // bit sizes [ 1, 63[ + /// {64, Legal}, // bit sizes [64, 65[ + /// {65, Unsupported}, // bit sizes [65, +inf[ + /// }); + void setScalarAction(const unsigned Opcode, const unsigned TypeIndex, + const SizeAndActionsVec &SizeAndActions) { + const unsigned OpcodeIdx = Opcode - FirstOp; + SmallVector &Actions = ScalarActions[OpcodeIdx]; + setActions(TypeIndex, Actions, SizeAndActions); + } + void setPointerAction(const unsigned Opcode, const unsigned TypeIndex, + const unsigned AddressSpace, + const SizeAndActionsVec &SizeAndActions) { + const unsigned OpcodeIdx = Opcode - FirstOp; + if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) == + AddrSpace2PointerActions[OpcodeIdx].end()) + AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}}; + SmallVector &Actions = + AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second; + setActions(TypeIndex, Actions, SizeAndActions); + } + + /// If an operation on a given vector type (say ) isn't explicitly + /// specified, we proceed in 2 stages. First we legalize the underlying scalar + /// (so that there's at least one legal vector with that scalar), then we + /// adjust the number of elements in the vector so that it is legal. The + /// desired action in the first step is controlled by this function. + void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex, + const SizeAndActionsVec &SizeAndActions) { + unsigned OpcodeIdx = Opcode - FirstOp; + SmallVector &Actions = + ScalarInVectorActions[OpcodeIdx]; + setActions(TypeIndex, Actions, SizeAndActions); + } + + /// See also setScalarInVectorAction. + /// This function let's you specify the number of elements in a vector that + /// are legal for a legal element size. + void setVectorNumElementAction(const unsigned Opcode, + const unsigned TypeIndex, + const unsigned ElementSize, + const SizeAndActionsVec &SizeAndActions) { + const unsigned OpcodeIdx = Opcode - FirstOp; + if (NumElements2Actions[OpcodeIdx].find(ElementSize) == + NumElements2Actions[OpcodeIdx].end()) + NumElements2Actions[OpcodeIdx][ElementSize] = {{}}; + SmallVector &Actions = + NumElements2Actions[OpcodeIdx].find(ElementSize)->second; + setActions(TypeIndex, Actions, SizeAndActions); + } + + /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes, + /// i.e. it's OK if it doesn't start from size 1. + static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) { +#ifndef NDEBUG + // The sizes should be in increasing order + int prev_size = -1; + for(auto SizeAndAction: v) { + assert(SizeAndAction.first > prev_size); + prev_size = SizeAndAction.first; + } + // - for every Widen action, there should be a larger bitsize that + // can be legalized towards (e.g. Legal, Lower, Libcall or Custom + // action). + // - for every Narrow action, there should be a smaller bitsize that + // can be legalized towards. + int SmallestNarrowIdx = -1; + int LargestWidenIdx = -1; + int SmallestLegalizableToSameSizeIdx = -1; + int LargestLegalizableToSameSizeIdx = -1; + for(size_t i=0; i SmallestLegalizableToSameSizeIdx); + } + if (LargestWidenIdx != -1) + assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx); +#endif + } - using TypeMap = DenseMap; - using SIVActionMap = DenseMap, LegalizeAction>; + /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with + /// from size 1. + static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) { +#ifndef NDEBUG + // Data structure invariant: The first bit size must be size 1. + assert(v.size() >= 1); + assert(v[0].first == 1); + checkPartialSizeAndActionsVector(v); +#endif + } + + /// Sets actions for all bit sizes on a particular generic opcode, type + /// index and scalar or pointer type. + void setActions(unsigned TypeIndex, + SmallVector &Actions, + const SizeAndActionsVec &SizeAndActions) { + checkFullSizeAndActionsVector(SizeAndActions); + if (Actions.size() <= TypeIndex) + Actions.resize(TypeIndex + 1); + Actions[TypeIndex] = SizeAndActions; + } - SmallVector Actions[LastOp - FirstOp + 1]; - SIVActionMap ScalarInVectorActions; - DenseMap, uint16_t> MaxLegalVectorElts; - DenseMap DefaultActions; + static SizeAndAction findAction(const SizeAndActionsVec &Vec, + const uint32_t Size); + + /// Returns the next action needed to get the scalar or pointer type closer + /// to being legal + /// E.g. findLegalAction({G_REM, 13}) should return + /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will + /// probably be called, which should return (Lower, 32). + /// This is assuming the setScalarAction on G_REM was something like: + /// setScalarAction(G_REM, 0, + /// {{1, WidenScalar}, // bit sizes [ 1, 31[ + /// {32, Lower}, // bit sizes [32, 33[ + /// {33, NarrowScalar} // bit sizes [65, +inf[ + /// }); + std::pair + findScalarLegalAction(const InstrAspect &Aspect) const; + + /// Returns the next action needed towards legalizing the vector type. + std::pair + findVectorLegalAction(const InstrAspect &Aspect) const; + + static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START; + static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END; - bool TablesInitialized = false; + // Data structures used temporarily during construction of legality data: + typedef DenseMap TypeMap; + SmallVector SpecifiedActions[LastOp - FirstOp + 1]; + SmallVector + ScalarSizeChangeStrategies[LastOp - FirstOp + 1]; + SmallVector + VectorElementSizeChangeStrategies[LastOp - FirstOp + 1]; + bool TablesInitialized; + + // Data structures used by getAction: + SmallVector ScalarActions[LastOp - FirstOp + 1]; + SmallVector ScalarInVectorActions[LastOp - FirstOp + 1]; + std::unordered_map> + AddrSpace2PointerActions[LastOp - FirstOp + 1]; + std::unordered_map> + NumElements2Actions[LastOp - FirstOp + 1]; }; -} // end namespace llvm +} // end namespace llvm. #endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H diff --git a/include/llvm/Support/LowLevelTypeImpl.h b/include/llvm/Support/LowLevelTypeImpl.h index c79dd0c29507..099fa4618997 100644 --- a/include/llvm/Support/LowLevelTypeImpl.h +++ b/include/llvm/Support/LowLevelTypeImpl.h @@ -137,51 +137,6 @@ public: return scalar(getScalarSizeInBits()); } - /// Get a low-level type with half the size of the original, by halving the - /// size of the scalar type involved. For example `s32` will become `s16`, - /// `<2 x s32>` will become `<2 x s16>`. - LLT halfScalarSize() const { - assert(!IsPointer && getScalarSizeInBits() > 1 && - getScalarSizeInBits() % 2 == 0 && "cannot half size of this type"); - return LLT{/*isPointer=*/false, IsVector ? true : false, - IsVector ? getNumElements() : (uint16_t)0, - getScalarSizeInBits() / 2, /*AddressSpace=*/0}; - } - - /// Get a low-level type with twice the size of the original, by doubling the - /// size of the scalar type involved. For example `s32` will become `s64`, - /// `<2 x s32>` will become `<2 x s64>`. - LLT doubleScalarSize() const { - assert(!IsPointer && "cannot change size of this type"); - return LLT{/*isPointer=*/false, IsVector ? true : false, - IsVector ? getNumElements() : (uint16_t)0, - getScalarSizeInBits() * 2, /*AddressSpace=*/0}; - } - - /// Get a low-level type with half the size of the original, by halving the - /// number of vector elements of the scalar type involved. The source must be - /// a vector type with an even number of elements. For example `<4 x s32>` - /// will become `<2 x s32>`, `<2 x s32>` will become `s32`. - LLT halfElements() const { - assert(isVector() && getNumElements() % 2 == 0 && "cannot half odd vector"); - if (getNumElements() == 2) - return scalar(getScalarSizeInBits()); - - return LLT{/*isPointer=*/false, /*isVector=*/true, - (uint16_t)(getNumElements() / 2), getScalarSizeInBits(), - /*AddressSpace=*/0}; - } - - /// Get a low-level type with twice the size of the original, by doubling the - /// number of vector elements of the scalar type involved. The source must be - /// a vector type. For example `<2 x s32>` will become `<4 x s32>`. Doubling - /// the number of elements in sN produces <2 x sN>. - LLT doubleElements() const { - return LLT{IsPointer ? true : false, /*isVector=*/true, - (uint16_t)(getNumElements() * 2), getScalarSizeInBits(), - IsPointer ? getAddressSpace() : 0}; - } - void print(raw_ostream &OS) const; bool operator==(const LLT &RHS) const { -- cgit v1.2.1 From 8cec6c49168a1b9f49f1391c4b4750a7271ff354 Mon Sep 17 00:00:00 2001 From: Petar Jovanovic Date: Tue, 7 Nov 2017 14:40:27 +0000 Subject: Reland "Correct dwarf unwind information in function epilogue for X86" Reland r317100 with minor fix regarding ComputeCommonTailLength function in BranchFolding.cpp. Skipping top CFI instructions block needs to executed on several more return points in ComputeCommonTailLength(). Original r317100 message: "Correct dwarf unwind information in function epilogue for X86" This patch aims to provide correct dwarf unwind information in function epilogue for X86. It consists of two parts. The first part inserts CFI instructions that set appropriate cfa offset and cfa register in emitEpilogue() in X86FrameLowering. This part is X86 specific. The second part is platform independent and ensures that: - CFI instructions do not affect code generation - Unwind information remains correct when a function is modified by different passes. This is done in a late pass by analyzing information about cfa offset and cfa register in BBs and inserting additional CFI directives where necessary. Changed CFI instructions so that they: - are duplicable - are not counted as instructions when tail duplicating or tail merging - can be compared as equal Added CFIInstrInserter pass: - analyzes each basic block to determine cfa offset and register valid at its entry and exit - verifies that outgoing cfa offset and register of predecessor blocks match incoming values of their successors - inserts additional CFI directives at basic block beginning to correct the rule for calculating CFA Having CFI instructions in function epilogue can cause incorrect CFA calculation rule for some basic blocks. This can happen if, due to basic block reordering, or the existence of multiple epilogue blocks, some of the blocks have wrong cfa offset and register values set by the epilogue block above them. CFIInstrInserter is currently run only on X86, but can be used by any target that implements support for adding CFI instructions in epilogue. Patch by Violeta Vukobrat. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317579 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/Passes.h | 3 +++ include/llvm/CodeGen/TargetFrameLowering.h | 8 ++++++++ include/llvm/InitializePasses.h | 1 + include/llvm/Target/Target.td | 2 +- 4 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index c106ff6cdfef..bf35b7d653b7 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -420,6 +420,9 @@ namespace llvm { // This pass expands memcmp() to load/stores. FunctionPass *createExpandMemCmpPass(); + /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp + FunctionPass *createCFIInstrInserter(); + } // End llvm namespace #endif diff --git a/include/llvm/CodeGen/TargetFrameLowering.h b/include/llvm/CodeGen/TargetFrameLowering.h index 5cf4627f3c96..a94dbd7c5c00 100644 --- a/include/llvm/CodeGen/TargetFrameLowering.h +++ b/include/llvm/CodeGen/TargetFrameLowering.h @@ -341,6 +341,14 @@ public: return false; return true; } + + /// Return initial CFA offset value i.e. the one valid at the beginning of the + /// function (before any stack operations). + virtual int getInitialCFAOffset(const MachineFunction &MF) const; + + /// Return initial CFA register value i.e. the one valid at the beginning of + /// the function (before any stack operations). + virtual unsigned getInitialCFARegister(const MachineFunction &MF) const; }; } // End llvm namespace diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index 9cdb49330ae1..7616534d8d5b 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -86,6 +86,7 @@ void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&); void initializeCFGPrinterLegacyPassPass(PassRegistry&); void initializeCFGSimplifyPassPass(PassRegistry&); void initializeCFGViewerLegacyPassPass(PassRegistry&); +void initializeCFIInstrInserterPass(PassRegistry&); void initializeCFLAndersAAWrapperPassPass(PassRegistry&); void initializeCFLSteensAAWrapperPassPass(PassRegistry&); void initializeCallGraphDOTPrinterPass(PassRegistry&); diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index 048bd1f2a0cc..927da1c72d46 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -902,7 +902,7 @@ def CFI_INSTRUCTION : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; - let isNotDuplicable = 1; + let isNotDuplicable = 0; } def EH_LABEL : Instruction { let OutOperandList = (outs); -- cgit v1.2.1 From e9d757c19ed1063bc4fb2984da86342c06e1dd4d Mon Sep 17 00:00:00 2001 From: Paul Robinson Date: Tue, 7 Nov 2017 19:57:12 +0000 Subject: [DWARFv5] Support DW_FORM_strp in the .debug_line header. Supporting this form in .debug_line.dwo will be done as a follow-up. Differential Revision: https://reviews.llvm.org/D33155 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317607 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/DebugInfo/DWARF/DWARFDebugLine.h | 8 +++++--- include/llvm/DebugInfo/DWARF/DWARFFormValue.h | 14 +++++--------- 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h b/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h index a4d8c0dd716d..f89bcf82fee6 100644 --- a/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h +++ b/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h @@ -22,6 +22,7 @@ namespace llvm { +class DWARFUnit; class raw_ostream; class DWARFDebugLine { @@ -95,7 +96,8 @@ public: void clear(); void dump(raw_ostream &OS) const; - bool parse(const DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr); + bool parse(const DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr, + const DWARFUnit *U = nullptr); }; /// Standard .debug_line state machine structure. @@ -218,7 +220,7 @@ public: /// Parse prologue and all rows. bool parse(const DWARFDataExtractor &DebugLineData, uint32_t *OffsetPtr, - raw_ostream *OS = nullptr); + const DWARFUnit *U, raw_ostream *OS = nullptr); using RowVector = std::vector; using RowIter = RowVector::const_iterator; @@ -236,7 +238,7 @@ public: const LineTable *getLineTable(uint32_t Offset) const; const LineTable *getOrParseLineTable(const DWARFDataExtractor &DebugLineData, - uint32_t Offset); + uint32_t Offset, const DWARFUnit *U); private: struct ParsingState { diff --git a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h index 497fe591c967..d32053519ec4 100644 --- a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h +++ b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h @@ -104,16 +104,12 @@ public: const DWARFUnit *getUnit() const { return U; } void dump(raw_ostream &OS, DIDumpOptions DumpOpts = DIDumpOptions()) const; - /// Extracts a value in \p Data at offset \p *OffsetPtr. - /// - /// The passed DWARFUnit is allowed to be nullptr, in which case some - /// kind of forms that depend on Unit information are disallowed. - /// \param Data The DWARFDataExtractor to use. - /// \param OffsetPtr The offset within \p Data where the data starts. - /// \param U The optional DWARFUnit supplying information for some forms. - /// \returns whether the extraction succeeded. + /// Extracts a value in \p Data at offset \p *OffsetPtr. The information + /// in \p FormParams is needed to interpret some forms. The optional + /// \p Unit allows extracting information if the form refers to other + /// sections (e.g., .debug_str). bool extractValue(const DWARFDataExtractor &Data, uint32_t *OffsetPtr, - const DWARFUnit *U); + DWARFFormParams FormParams, const DWARFUnit *U = nullptr); bool isInlinedCStr() const { return Value.data != nullptr && Value.data == (const uint8_t *)Value.cstr; -- cgit v1.2.1 From 56fec39d44a4b10ddcb8963f636bd350abc892b0 Mon Sep 17 00:00:00 2001 From: Mitch Phillips Date: Tue, 7 Nov 2017 21:16:46 +0000 Subject: Extend SpecialCaseList to allow users to blame matches on entries in the file. Summary: Extends SCL functionality to allow users to find the line number in the file the SCL is built from through SpecialCaseList::inSectionBlame(...). Also removes the need to compile the SCL before use. As the matcher now contains a list of regexes to test against instead of a single regex, the regexes can be individually built on each insertion rather than one large compilation at the end of construction. This change also fixes a bug where blank lines would cause the parser to become out-of-sync with the line number. An error on line `k` was being reported as being on line `k - num_blank_lines_before_k`. Note: This change has a cyclical dependency on D39486. Both these changes must be submitted at the same time to avoid a build breakage. Reviewers: vlad.tsyrklevich Reviewed By: vlad.tsyrklevich Subscribers: kcc, pcc, llvm-commits Differential Revision: https://reviews.llvm.org/D39485 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317617 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/SpecialCaseList.h | 37 ++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/llvm/Support/SpecialCaseList.h b/include/llvm/Support/SpecialCaseList.h index f76ca305efb8..fd62fc48047b 100644 --- a/include/llvm/Support/SpecialCaseList.h +++ b/include/llvm/Support/SpecialCaseList.h @@ -89,6 +89,17 @@ public: bool inSection(StringRef Section, StringRef Prefix, StringRef Query, StringRef Category = StringRef()) const; + /// Returns the line number corresponding to the special case list entry if + /// the special case list contains a line + /// \code + /// @Prefix:=@Category + /// \endcode + /// where @Query satisfies wildcard expression in a given @Section. + /// Returns zero if there is no blacklist entry corresponding to this + /// expression. + unsigned inSectionBlame(StringRef Section, StringRef Prefix, StringRef Query, + StringRef Category = StringRef()) const; + protected: // Implementations of the create*() functions that can also be used by derived // classes. @@ -96,25 +107,25 @@ protected: std::string &Error); bool createInternal(const MemoryBuffer *MB, std::string &Error); + SpecialCaseList() = default; SpecialCaseList(SpecialCaseList const &) = delete; SpecialCaseList &operator=(SpecialCaseList const &) = delete; /// Represents a set of regular expressions. Regular expressions which are - /// "literal" (i.e. no regex metacharacters) are stored in Strings, while all - /// others are represented as a single pipe-separated regex in RegEx. The - /// reason for doing so is efficiency; StringSet is much faster at matching + /// "literal" (i.e. no regex metacharacters) are stored in Strings. The + /// reason for doing so is efficiency; StringMap is much faster at matching /// literal strings than Regex. class Matcher { public: - bool insert(std::string Regexp, std::string &REError); - void compile(); - bool match(StringRef Query) const; + bool insert(std::string Regexp, unsigned LineNumber, std::string &REError); + // Returns the line number in the source file that this query matches to. + // Returns zero if no match is found. + unsigned match(StringRef Query) const; private: - StringSet<> Strings; + StringMap Strings; TrigramIndex Trigrams; - std::unique_ptr RegEx; - std::string UncompiledRegEx; + std::vector, unsigned>> RegExes; }; using SectionEntries = StringMap>; @@ -127,19 +138,15 @@ protected: }; std::vector
Sections; - bool IsCompiled; - SpecialCaseList(); /// Parses just-constructed SpecialCaseList entries from a memory buffer. bool parse(const MemoryBuffer *MB, StringMap &SectionsMap, std::string &Error); - /// compile() should be called once, after parsing all the memory buffers. - void compile(); // Helper method for derived classes to search by Prefix, Query, and Category // once they have already resolved a section entry. - bool inSection(const SectionEntries &Entries, StringRef Prefix, - StringRef Query, StringRef Category) const; + unsigned inSectionBlame(const SectionEntries &Entries, StringRef Prefix, + StringRef Query, StringRef Category) const; }; } // namespace llvm -- cgit v1.2.1 From d8660fa5dc39cff9877b4f885d7a4d10aad20a65 Mon Sep 17 00:00:00 2001 From: Justin Lebar Date: Tue, 7 Nov 2017 22:10:54 +0000 Subject: [NVPTX] Implement __nvvm_atom_add_gen_d builtin. Summary: This just seems to have been an oversight. We already supported the f64 atomic add with an explicit scope (e.g. "cta"), but not the scopeless version. Reviewers: tra Subscribers: jholewinski, sanjoy, cfe-commits, llvm-commits, hiraditya Differential Revision: https://reviews.llvm.org/D39638 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317623 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/IntrinsicsNVVM.td | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/IR/IntrinsicsNVVM.td b/include/llvm/IR/IntrinsicsNVVM.td index 7ba1a3eb2e5b..249419d15d3f 100644 --- a/include/llvm/IR/IntrinsicsNVVM.td +++ b/include/llvm/IR/IntrinsicsNVVM.td @@ -683,10 +683,15 @@ let TargetPrefix = "nvvm" in { Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>; -// Atomic not available as an llvm intrinsic. +// Atomics not available as llvm intrinsics. def int_nvvm_atomic_load_add_f32 : Intrinsic<[llvm_float_ty], [LLVMAnyPointerType, llvm_float_ty], [IntrArgMemOnly, NoCapture<0>]>; + // Atomic add of f64 requires sm_60. + def int_nvvm_atomic_load_add_f64 : Intrinsic<[llvm_double_ty], + [LLVMAnyPointerType, llvm_double_ty], + [IntrArgMemOnly, NoCapture<0>]>; + def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty], [LLVMAnyPointerType, llvm_i32_ty], [IntrArgMemOnly, NoCapture<0>]>; -- cgit v1.2.1 From 48319238e40440d4a153ba057e8f1292a73767f4 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Wed, 8 Nov 2017 01:01:31 +0000 Subject: Target/TargetInstrInfo.h -> CodeGen/TargetInstrInfo.h to match layering This header includes CodeGen headers, and is not, itself, included by any Target headers, so move it into CodeGen to match the layering of its implementation. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317647 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../CodeGen/GlobalISel/InstructionSelectorImpl.h | 2 +- include/llvm/CodeGen/ResourcePriorityQueue.h | 2 +- include/llvm/CodeGen/TailDuplicator.h | 4 +- include/llvm/CodeGen/TargetInstrInfo.h | 1691 ++++++++++++++++++++ include/llvm/Target/TargetInstrInfo.h | 1691 -------------------- 5 files changed, 1695 insertions(+), 1695 deletions(-) create mode 100644 include/llvm/CodeGen/TargetInstrInfo.h delete mode 100644 include/llvm/Target/TargetInstrInfo.h (limited to 'include') diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h index ae9396d9c219..84b6ec9beead 100644 --- a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h +++ b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h @@ -22,11 +22,11 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/IR/Constants.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetOpcodes.h" #include "llvm/Target/TargetRegisterInfo.h" #include diff --git a/include/llvm/CodeGen/ResourcePriorityQueue.h b/include/llvm/CodeGen/ResourcePriorityQueue.h index 1a4f994259de..cc64e9d572e5 100644 --- a/include/llvm/CodeGen/ResourcePriorityQueue.h +++ b/include/llvm/CodeGen/ResourcePriorityQueue.h @@ -20,8 +20,8 @@ #include "llvm/CodeGen/DFAPacketizer.h" #include "llvm/CodeGen/ScheduleDAG.h" #include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/MC/MCInstrItineraries.h" -#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" namespace llvm { diff --git a/include/llvm/CodeGen/TailDuplicator.h b/include/llvm/CodeGen/TailDuplicator.h index e5f110293c33..ea202b2e4092 100644 --- a/include/llvm/CodeGen/TailDuplicator.h +++ b/include/llvm/CodeGen/TailDuplicator.h @@ -17,12 +17,12 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/Support/CommandLine.h" -#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" #include #include diff --git a/include/llvm/CodeGen/TargetInstrInfo.h b/include/llvm/CodeGen/TargetInstrInfo.h new file mode 100644 index 000000000000..6770e503e615 --- /dev/null +++ b/include/llvm/CodeGen/TargetInstrInfo.h @@ -0,0 +1,1691 @@ +//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the target machine instruction set to the code generator. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETINSTRINFO_H +#define LLVM_TARGET_TARGETINSTRINFO_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/None.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineCombinerPattern.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/Support/BranchProbability.h" +#include "llvm/Support/ErrorHandling.h" +#include +#include +#include +#include +#include + +namespace llvm { + +class DFAPacketizer; +class InstrItineraryData; +class LiveVariables; +class MachineMemOperand; +class MachineRegisterInfo; +class MCAsmInfo; +class MCInst; +struct MCSchedModel; +class Module; +class ScheduleDAG; +class ScheduleHazardRecognizer; +class SDNode; +class SelectionDAG; +class RegScavenger; +class TargetRegisterClass; +class TargetRegisterInfo; +class TargetSchedModel; +class TargetSubtargetInfo; + +template class SmallVectorImpl; + +//--------------------------------------------------------------------------- +/// +/// TargetInstrInfo - Interface to description of machine instruction set +/// +class TargetInstrInfo : public MCInstrInfo { +public: + TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, + unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u) + : CallFrameSetupOpcode(CFSetupOpcode), + CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode), + ReturnOpcode(ReturnOpcode) {} + TargetInstrInfo(const TargetInstrInfo &) = delete; + TargetInstrInfo &operator=(const TargetInstrInfo &) = delete; + virtual ~TargetInstrInfo(); + + static bool isGenericOpcode(unsigned Opc) { + return Opc <= TargetOpcode::GENERIC_OP_END; + } + + /// Given a machine instruction descriptor, returns the register + /// class constraint for OpNum, or NULL. + const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum, + const TargetRegisterInfo *TRI, + const MachineFunction &MF) const; + + /// Return true if the instruction is trivially rematerializable, meaning it + /// has no side effects and requires no operands that aren't always available. + /// This means the only allowed uses are constants and unallocatable physical + /// registers so that the instructions result is independent of the place + /// in the function. + bool isTriviallyReMaterializable(const MachineInstr &MI, + AliasAnalysis *AA = nullptr) const { + return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF || + (MI.getDesc().isRematerializable() && + (isReallyTriviallyReMaterializable(MI, AA) || + isReallyTriviallyReMaterializableGeneric(MI, AA))); + } + +protected: + /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is + /// set, this hook lets the target specify whether the instruction is actually + /// trivially rematerializable, taking into consideration its operands. This + /// predicate must return false if the instruction has any side effects other + /// than producing a value, or if it requres any address registers that are + /// not always available. + /// Requirements must be check as stated in isTriviallyReMaterializable() . + virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI, + AliasAnalysis *AA) const { + return false; + } + + /// This method commutes the operands of the given machine instruction MI. + /// The operands to be commuted are specified by their indices OpIdx1 and + /// OpIdx2. + /// + /// If a target has any instructions that are commutable but require + /// converting to different instructions or making non-trivial changes + /// to commute them, this method can be overloaded to do that. + /// The default implementation simply swaps the commutable operands. + /// + /// If NewMI is false, MI is modified in place and returned; otherwise, a + /// new machine instruction is created and returned. + /// + /// Do not call this method for a non-commutable instruction. + /// Even though the instruction is commutable, the method may still + /// fail to commute the operands, null pointer is returned in such cases. + virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, + unsigned OpIdx1, + unsigned OpIdx2) const; + + /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable + /// operand indices to (ResultIdx1, ResultIdx2). + /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be + /// predefined to some indices or be undefined (designated by the special + /// value 'CommuteAnyOperandIndex'). + /// The predefined result indices cannot be re-defined. + /// The function returns true iff after the result pair redefinition + /// the fixed result pair is equal to or equivalent to the source pair of + /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that + /// the pairs (x,y) and (y,x) are equivalent. + static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, + unsigned CommutableOpIdx1, + unsigned CommutableOpIdx2); + +private: + /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is + /// set and the target hook isReallyTriviallyReMaterializable returns false, + /// this function does target-independent tests to determine if the + /// instruction is really trivially rematerializable. + bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI, + AliasAnalysis *AA) const; + +public: + /// These methods return the opcode of the frame setup/destroy instructions + /// if they exist (-1 otherwise). Some targets use pseudo instructions in + /// order to abstract away the difference between operating with a frame + /// pointer and operating without, through the use of these two instructions. + /// + unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } + unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } + + /// Returns true if the argument is a frame pseudo instruction. + bool isFrameInstr(const MachineInstr &I) const { + return I.getOpcode() == getCallFrameSetupOpcode() || + I.getOpcode() == getCallFrameDestroyOpcode(); + } + + /// Returns true if the argument is a frame setup pseudo instruction. + bool isFrameSetup(const MachineInstr &I) const { + return I.getOpcode() == getCallFrameSetupOpcode(); + } + + /// Returns size of the frame associated with the given frame instruction. + /// For frame setup instruction this is frame that is set up space set up + /// after the instruction. For frame destroy instruction this is the frame + /// freed by the caller. + /// Note, in some cases a call frame (or a part of it) may be prepared prior + /// to the frame setup instruction. It occurs in the calls that involve + /// inalloca arguments. This function reports only the size of the frame part + /// that is set up between the frame setup and destroy pseudo instructions. + int64_t getFrameSize(const MachineInstr &I) const { + assert(isFrameInstr(I) && "Not a frame instruction"); + assert(I.getOperand(0).getImm() >= 0); + return I.getOperand(0).getImm(); + } + + /// Returns the total frame size, which is made up of the space set up inside + /// the pair of frame start-stop instructions and the space that is set up + /// prior to the pair. + int64_t getFrameTotalSize(const MachineInstr &I) const { + if (isFrameSetup(I)) { + assert(I.getOperand(1).getImm() >= 0 && + "Frame size must not be negative"); + return getFrameSize(I) + I.getOperand(1).getImm(); + } + return getFrameSize(I); + } + + unsigned getCatchReturnOpcode() const { return CatchRetOpcode; } + unsigned getReturnOpcode() const { return ReturnOpcode; } + + /// Returns the actual stack pointer adjustment made by an instruction + /// as part of a call sequence. By default, only call frame setup/destroy + /// instructions adjust the stack, but targets may want to override this + /// to enable more fine-grained adjustment, or adjust by a different value. + virtual int getSPAdjust(const MachineInstr &MI) const; + + /// Return true if the instruction is a "coalescable" extension instruction. + /// That is, it's like a copy where it's legal for the source to overlap the + /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's + /// expected the pre-extension value is available as a subreg of the result + /// register. This also returns the sub-register index in SubIdx. + virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, + unsigned &DstReg, unsigned &SubIdx) const { + return false; + } + + /// If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, + int &FrameIndex) const { + return 0; + } + + /// Check for post-frame ptr elimination stack locations as well. + /// This uses a heuristic so it isn't reliable for correctness. + virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, + int &FrameIndex) const { + return 0; + } + + /// If the specified machine instruction has a load from a stack slot, + /// return true along with the FrameIndex of the loaded stack slot and the + /// machine mem operand containing the reference. + /// If not, return false. Unlike isLoadFromStackSlot, this returns true for + /// any instructions that loads from the stack. This is just a hint, as some + /// cases may be missed. + virtual bool hasLoadFromStackSlot(const MachineInstr &MI, + const MachineMemOperand *&MMO, + int &FrameIndex) const; + + /// If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + virtual unsigned isStoreToStackSlot(const MachineInstr &MI, + int &FrameIndex) const { + return 0; + } + + /// Check for post-frame ptr elimination stack locations as well. + /// This uses a heuristic, so it isn't reliable for correctness. + virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, + int &FrameIndex) const { + return 0; + } + + /// If the specified machine instruction has a store to a stack slot, + /// return true along with the FrameIndex of the loaded stack slot and the + /// machine mem operand containing the reference. + /// If not, return false. Unlike isStoreToStackSlot, + /// this returns true for any instructions that stores to the + /// stack. This is just a hint, as some cases may be missed. + virtual bool hasStoreToStackSlot(const MachineInstr &MI, + const MachineMemOperand *&MMO, + int &FrameIndex) const; + + /// Return true if the specified machine instruction + /// is a copy of one stack slot to another and has no other effect. + /// Provide the identity of the two frame indices. + virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, + int &SrcFrameIndex) const { + return false; + } + + /// Compute the size in bytes and offset within a stack slot of a spilled + /// register or subregister. + /// + /// \param [out] Size in bytes of the spilled value. + /// \param [out] Offset in bytes within the stack slot. + /// \returns true if both Size and Offset are successfully computed. + /// + /// Not all subregisters have computable spill slots. For example, + /// subregisters registers may not be byte-sized, and a pair of discontiguous + /// subregisters has no single offset. + /// + /// Targets with nontrivial bigendian implementations may need to override + /// this, particularly to support spilled vector registers. + virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, + unsigned &Size, unsigned &Offset, + const MachineFunction &MF) const; + + /// Returns the size in bytes of the specified MachineInstr, or ~0U + /// when this function is not implemented by a target. + virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const { + return ~0U; + } + + /// Return true if the instruction is as cheap as a move instruction. + /// + /// Targets for different archs need to override this, and different + /// micro-architectures can also be finely tuned inside. + virtual bool isAsCheapAsAMove(const MachineInstr &MI) const { + return MI.isAsCheapAsAMove(); + } + + /// Return true if the instruction should be sunk by MachineSink. + /// + /// MachineSink determines on its own whether the instruction is safe to sink; + /// this gives the target a hook to override the default behavior with regards + /// to which instructions should be sunk. + virtual bool shouldSink(const MachineInstr &MI) const { return true; } + + /// Re-issue the specified 'original' instruction at the + /// specific location targeting a new destination register. + /// The register in Orig->getOperand(0).getReg() will be substituted by + /// DestReg:SubIdx. Any existing subreg index is preserved or composed with + /// SubIdx. + virtual void reMaterialize(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, unsigned DestReg, + unsigned SubIdx, const MachineInstr &Orig, + const TargetRegisterInfo &TRI) const; + + /// \brief Clones instruction or the whole instruction bundle \p Orig and + /// insert into \p MBB before \p InsertBefore. The target may update operands + /// that are required to be unique. + /// + /// \p Orig must not return true for MachineInstr::isNotDuplicable(). + virtual MachineInstr &duplicate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator InsertBefore, + const MachineInstr &Orig) const; + + /// This method must be implemented by targets that + /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target + /// may be able to convert a two-address instruction into one or more true + /// three-address instructions on demand. This allows the X86 target (for + /// example) to convert ADD and SHL instructions into LEA instructions if they + /// would require register copies due to two-addressness. + /// + /// This method returns a null pointer if the transformation cannot be + /// performed, otherwise it returns the last new instruction. + /// + virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, + MachineInstr &MI, + LiveVariables *LV) const { + return nullptr; + } + + // This constant can be used as an input value of operand index passed to + // the method findCommutedOpIndices() to tell the method that the + // corresponding operand index is not pre-defined and that the method + // can pick any commutable operand. + static const unsigned CommuteAnyOperandIndex = ~0U; + + /// This method commutes the operands of the given machine instruction MI. + /// + /// The operands to be commuted are specified by their indices OpIdx1 and + /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value + /// 'CommuteAnyOperandIndex', which means that the method is free to choose + /// any arbitrarily chosen commutable operand. If both arguments are set to + /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable + /// operands; then commutes them if such operands could be found. + /// + /// If NewMI is false, MI is modified in place and returned; otherwise, a + /// new machine instruction is created and returned. + /// + /// Do not call this method for a non-commutable instruction or + /// for non-commuable operands. + /// Even though the instruction is commutable, the method may still + /// fail to commute the operands, null pointer is returned in such cases. + MachineInstr * + commuteInstruction(MachineInstr &MI, bool NewMI = false, + unsigned OpIdx1 = CommuteAnyOperandIndex, + unsigned OpIdx2 = CommuteAnyOperandIndex) const; + + /// Returns true iff the routine could find two commutable operands in the + /// given machine instruction. + /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. + /// If any of the INPUT values is set to the special value + /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable + /// operand, then returns its index in the corresponding argument. + /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method + /// looks for 2 commutable operands. + /// If INPUT values refer to some operands of MI, then the method simply + /// returns true if the corresponding operands are commutable and returns + /// false otherwise. + /// + /// For example, calling this method this way: + /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; + /// findCommutedOpIndices(MI, Op1, Op2); + /// can be interpreted as a query asking to find an operand that would be + /// commutable with the operand#1. + virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, + unsigned &SrcOpIdx2) const; + + /// A pair composed of a register and a sub-register index. + /// Used to give some type checking when modeling Reg:SubReg. + struct RegSubRegPair { + unsigned Reg; + unsigned SubReg; + + RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0) + : Reg(Reg), SubReg(SubReg) {} + }; + + /// A pair composed of a pair of a register and a sub-register index, + /// and another sub-register index. + /// Used to give some type checking when modeling Reg:SubReg1, SubReg2. + struct RegSubRegPairAndIdx : RegSubRegPair { + unsigned SubIdx; + + RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0, + unsigned SubIdx = 0) + : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {} + }; + + /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI + /// and \p DefIdx. + /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of + /// the list is modeled as . + /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce + /// two elements: + /// - vreg1:sub1, sub0 + /// - vreg2<:0>, sub1 + /// + /// \returns true if it is possible to build such an input sequence + /// with the pair \p MI, \p DefIdx. False otherwise. + /// + /// \pre MI.isRegSequence() or MI.isRegSequenceLike(). + /// + /// \note The generic implementation does not provide any support for + /// MI.isRegSequenceLike(). In other words, one has to override + /// getRegSequenceLikeInputs for target specific instructions. + bool + getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, + SmallVectorImpl &InputRegs) const; + + /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI + /// and \p DefIdx. + /// \p [out] InputReg of the equivalent EXTRACT_SUBREG. + /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce: + /// - vreg1:sub1, sub0 + /// + /// \returns true if it is possible to build such an input sequence + /// with the pair \p MI, \p DefIdx. False otherwise. + /// + /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike(). + /// + /// \note The generic implementation does not provide any support for + /// MI.isExtractSubregLike(). In other words, one has to override + /// getExtractSubregLikeInputs for target specific instructions. + bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, + RegSubRegPairAndIdx &InputReg) const; + + /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI + /// and \p DefIdx. + /// \p [out] BaseReg and \p [out] InsertedReg contain + /// the equivalent inputs of INSERT_SUBREG. + /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce: + /// - BaseReg: vreg0:sub0 + /// - InsertedReg: vreg1:sub1, sub3 + /// + /// \returns true if it is possible to build such an input sequence + /// with the pair \p MI, \p DefIdx. False otherwise. + /// + /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike(). + /// + /// \note The generic implementation does not provide any support for + /// MI.isInsertSubregLike(). In other words, one has to override + /// getInsertSubregLikeInputs for target specific instructions. + bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, + RegSubRegPair &BaseReg, + RegSubRegPairAndIdx &InsertedReg) const; + + /// Return true if two machine instructions would produce identical values. + /// By default, this is only true when the two instructions + /// are deemed identical except for defs. If this function is called when the + /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for + /// aggressive checks. + virtual bool produceSameValue(const MachineInstr &MI0, + const MachineInstr &MI1, + const MachineRegisterInfo *MRI = nullptr) const; + + /// \returns true if a branch from an instruction with opcode \p BranchOpc + /// bytes is capable of jumping to a position \p BrOffset bytes away. + virtual bool isBranchOffsetInRange(unsigned BranchOpc, + int64_t BrOffset) const { + llvm_unreachable("target did not implement"); + } + + /// \returns The block that branch instruction \p MI jumps to. + virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const { + llvm_unreachable("target did not implement"); + } + + /// Insert an unconditional indirect branch at the end of \p MBB to \p + /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to + /// the offset of the position to insert the new branch. + /// + /// \returns The number of bytes added to the block. + virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &NewDestBB, + const DebugLoc &DL, + int64_t BrOffset = 0, + RegScavenger *RS = nullptr) const { + llvm_unreachable("target did not implement"); + } + + /// Analyze the branching code at the end of MBB, returning + /// true if it cannot be understood (e.g. it's a switch dispatch or isn't + /// implemented for a target). Upon success, this returns false and returns + /// with the following information in various cases: + /// + /// 1. If this block ends with no branches (it just falls through to its succ) + /// just return false, leaving TBB/FBB null. + /// 2. If this block ends with only an unconditional branch, it sets TBB to be + /// the destination block. + /// 3. If this block ends with a conditional branch and it falls through to a + /// successor block, it sets TBB to be the branch destination block and a + /// list of operands that evaluate the condition. These operands can be + /// passed to other TargetInstrInfo methods to create new branches. + /// 4. If this block ends with a conditional branch followed by an + /// unconditional branch, it returns the 'true' destination in TBB, the + /// 'false' destination in FBB, and a list of operands that evaluate the + /// condition. These operands can be passed to other TargetInstrInfo + /// methods to create new branches. + /// + /// Note that removeBranch and insertBranch must be implemented to support + /// cases where this method returns success. + /// + /// If AllowModify is true, then this routine is allowed to modify the basic + /// block (e.g. delete instructions after the unconditional branch). + /// + /// The CFG information in MBB.Predecessors and MBB.Successors must be valid + /// before calling this function. + virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify = false) const { + return true; + } + + /// Represents a predicate at the MachineFunction level. The control flow a + /// MachineBranchPredicate represents is: + /// + /// Reg = LHS `Predicate` RHS == ConditionDef + /// if Reg then goto TrueDest else goto FalseDest + /// + struct MachineBranchPredicate { + enum ComparePredicate { + PRED_EQ, // True if two values are equal + PRED_NE, // True if two values are not equal + PRED_INVALID // Sentinel value + }; + + ComparePredicate Predicate = PRED_INVALID; + MachineOperand LHS = MachineOperand::CreateImm(0); + MachineOperand RHS = MachineOperand::CreateImm(0); + MachineBasicBlock *TrueDest = nullptr; + MachineBasicBlock *FalseDest = nullptr; + MachineInstr *ConditionDef = nullptr; + + /// SingleUseCondition is true if ConditionDef is dead except for the + /// branch(es) at the end of the basic block. + /// + bool SingleUseCondition = false; + + explicit MachineBranchPredicate() = default; + }; + + /// Analyze the branching code at the end of MBB and parse it into the + /// MachineBranchPredicate structure if possible. Returns false on success + /// and true on failure. + /// + /// If AllowModify is true, then this routine is allowed to modify the basic + /// block (e.g. delete instructions after the unconditional branch). + /// + virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, + MachineBranchPredicate &MBP, + bool AllowModify = false) const { + return true; + } + + /// Remove the branching code at the end of the specific MBB. + /// This is only invoked in cases where AnalyzeBranch returns success. It + /// returns the number of instructions that were removed. + /// If \p BytesRemoved is non-null, report the change in code size from the + /// removed instructions. + virtual unsigned removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved = nullptr) const { + llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!"); + } + + /// Insert branch code into the end of the specified MachineBasicBlock. The + /// operands to this method are the same as those returned by AnalyzeBranch. + /// This is only invoked in cases where AnalyzeBranch returns success. It + /// returns the number of instructions inserted. If \p BytesAdded is non-null, + /// report the change in code size from the added instructions. + /// + /// It is also invoked by tail merging to add unconditional branches in + /// cases where AnalyzeBranch doesn't apply because there was no original + /// branch to analyze. At least this much must be implemented, else tail + /// merging needs to be disabled. + /// + /// The CFG information in MBB.Predecessors and MBB.Successors must be valid + /// before calling this function. + virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded = nullptr) const { + llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!"); + } + + unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, + MachineBasicBlock *DestBB, + const DebugLoc &DL, + int *BytesAdded = nullptr) const { + return insertBranch(MBB, DestBB, nullptr, ArrayRef(), DL, + BytesAdded); + } + + /// Analyze the loop code, return true if it cannot be understoo. Upon + /// success, this function returns false and returns information about the + /// induction variable and compare instruction used at the end. + virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, + MachineInstr *&CmpInst) const { + return true; + } + + /// Generate code to reduce the loop iteration by one and check if the loop is + /// finished. Return the value/register of the the new loop count. We need + /// this function when peeling off one or more iterations of a loop. This + /// function assumes the nth iteration is peeled first. + virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, + MachineInstr &Cmp, + SmallVectorImpl &Cond, + SmallVectorImpl &PrevInsts, + unsigned Iter, unsigned MaxIter) const { + llvm_unreachable("Target didn't implement ReduceLoopCount"); + } + + /// Delete the instruction OldInst and everything after it, replacing it with + /// an unconditional branch to NewDest. This is used by the tail merging pass. + virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, + MachineBasicBlock *NewDest) const; + + /// Return true if it's legal to split the given basic + /// block at the specified instruction (i.e. instruction would be the start + /// of a new basic block). + virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) const { + return true; + } + + /// Return true if it's profitable to predicate + /// instructions with accumulated instruction latency of "NumCycles" + /// of the specified basic block, where the probability of the instructions + /// being executed is given by Probability, and Confidence is a measure + /// of our confidence that it will be properly predicted. + virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, + unsigned ExtraPredCycles, + BranchProbability Probability) const { + return false; + } + + /// Second variant of isProfitableToIfCvt. This one + /// checks for the case where two basic blocks from true and false path + /// of a if-then-else (diamond) are predicated on mutally exclusive + /// predicates, where the probability of the true path being taken is given + /// by Probability, and Confidence is a measure of our confidence that it + /// will be properly predicted. + virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, + unsigned ExtraTCycles, + MachineBasicBlock &FMBB, unsigned NumFCycles, + unsigned ExtraFCycles, + BranchProbability Probability) const { + return false; + } + + /// Return true if it's profitable for if-converter to duplicate instructions + /// of specified accumulated instruction latencies in the specified MBB to + /// enable if-conversion. + /// The probability of the instructions being executed is given by + /// Probability, and Confidence is a measure of our confidence that it + /// will be properly predicted. + virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, + unsigned NumCycles, + BranchProbability Probability) const { + return false; + } + + /// Return true if it's profitable to unpredicate + /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually + /// exclusive predicates. + /// e.g. + /// subeq r0, r1, #1 + /// addne r0, r1, #1 + /// => + /// sub r0, r1, #1 + /// addne r0, r1, #1 + /// + /// This may be profitable is conditional instructions are always executed. + virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, + MachineBasicBlock &FMBB) const { + return false; + } + + /// Return true if it is possible to insert a select + /// instruction that chooses between TrueReg and FalseReg based on the + /// condition code in Cond. + /// + /// When successful, also return the latency in cycles from TrueReg, + /// FalseReg, and Cond to the destination register. In most cases, a select + /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1 + /// + /// Some x86 implementations have 2-cycle cmov instructions. + /// + /// @param MBB Block where select instruction would be inserted. + /// @param Cond Condition returned by AnalyzeBranch. + /// @param TrueReg Virtual register to select when Cond is true. + /// @param FalseReg Virtual register to select when Cond is false. + /// @param CondCycles Latency from Cond+Branch to select output. + /// @param TrueCycles Latency from TrueReg to select output. + /// @param FalseCycles Latency from FalseReg to select output. + virtual bool canInsertSelect(const MachineBasicBlock &MBB, + ArrayRef Cond, unsigned TrueReg, + unsigned FalseReg, int &CondCycles, + int &TrueCycles, int &FalseCycles) const { + return false; + } + + /// Insert a select instruction into MBB before I that will copy TrueReg to + /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false. + /// + /// This function can only be called after canInsertSelect() returned true. + /// The condition in Cond comes from AnalyzeBranch, and it can be assumed + /// that the same flags or registers required by Cond are available at the + /// insertion point. + /// + /// @param MBB Block where select instruction should be inserted. + /// @param I Insertion point. + /// @param DL Source location for debugging. + /// @param DstReg Virtual register to be defined by select instruction. + /// @param Cond Condition as computed by AnalyzeBranch. + /// @param TrueReg Virtual register to copy when Cond is true. + /// @param FalseReg Virtual register to copy when Cons is false. + virtual void insertSelect(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, const DebugLoc &DL, + unsigned DstReg, ArrayRef Cond, + unsigned TrueReg, unsigned FalseReg) const { + llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!"); + } + + /// Analyze the given select instruction, returning true if + /// it cannot be understood. It is assumed that MI->isSelect() is true. + /// + /// When successful, return the controlling condition and the operands that + /// determine the true and false result values. + /// + /// Result = SELECT Cond, TrueOp, FalseOp + /// + /// Some targets can optimize select instructions, for example by predicating + /// the instruction defining one of the operands. Such targets should set + /// Optimizable. + /// + /// @param MI Select instruction to analyze. + /// @param Cond Condition controlling the select. + /// @param TrueOp Operand number of the value selected when Cond is true. + /// @param FalseOp Operand number of the value selected when Cond is false. + /// @param Optimizable Returned as true if MI is optimizable. + /// @returns False on success. + virtual bool analyzeSelect(const MachineInstr &MI, + SmallVectorImpl &Cond, + unsigned &TrueOp, unsigned &FalseOp, + bool &Optimizable) const { + assert(MI.getDesc().isSelect() && "MI must be a select instruction"); + return true; + } + + /// Given a select instruction that was understood by + /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by + /// merging it with one of its operands. Returns NULL on failure. + /// + /// When successful, returns the new select instruction. The client is + /// responsible for deleting MI. + /// + /// If both sides of the select can be optimized, PreferFalse is used to pick + /// a side. + /// + /// @param MI Optimizable select instruction. + /// @param NewMIs Set that record all MIs in the basic block up to \p + /// MI. Has to be updated with any newly created MI or deleted ones. + /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. + /// @returns Optimized instruction or NULL. + virtual MachineInstr *optimizeSelect(MachineInstr &MI, + SmallPtrSetImpl &NewMIs, + bool PreferFalse = false) const { + // This function must be implemented if Optimizable is ever set. + llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!"); + } + + /// Emit instructions to copy a pair of physical registers. + /// + /// This function should support copies within any legal register class as + /// well as any cross-class copies created during instruction selection. + /// + /// The source and destination registers may overlap, which may require a + /// careful implementation when multiple copy instructions are required for + /// large registers. See for example the ARM target. + virtual void copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, const DebugLoc &DL, + unsigned DestReg, unsigned SrcReg, + bool KillSrc) const { + llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); + } + + /// Store the specified register of the given register class to the specified + /// stack frame index. The store instruction is to be added to the given + /// machine basic block before the specified machine instruction. If isKill + /// is true, the register operand is the last use and must be marked kill. + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + llvm_unreachable("Target didn't implement " + "TargetInstrInfo::storeRegToStackSlot!"); + } + + /// Load the specified register of the given register class from the specified + /// stack frame index. The load instruction is to be added to the given + /// machine basic block before the specified machine instruction. + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + llvm_unreachable("Target didn't implement " + "TargetInstrInfo::loadRegFromStackSlot!"); + } + + /// This function is called for all pseudo instructions + /// that remain after register allocation. Many pseudo instructions are + /// created to help register allocation. This is the place to convert them + /// into real instructions. The target can edit MI in place, or it can insert + /// new instructions and erase MI. The function should return true if + /// anything was changed. + virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; } + + /// Check whether the target can fold a load that feeds a subreg operand + /// (or a subreg operand that feeds a store). + /// For example, X86 may want to return true if it can fold + /// movl (%esp), %eax + /// subb, %al, ... + /// Into: + /// subb (%esp), ... + /// + /// Ideally, we'd like the target implementation of foldMemoryOperand() to + /// reject subregs - but since this behavior used to be enforced in the + /// target-independent code, moving this responsibility to the targets + /// has the potential of causing nasty silent breakage in out-of-tree targets. + virtual bool isSubregFoldable() const { return false; } + + /// Attempt to fold a load or store of the specified stack + /// slot into the specified machine instruction for the specified operand(s). + /// If this is possible, a new instruction is returned with the specified + /// operand folded, otherwise NULL is returned. + /// The new instruction is inserted before MI, and the client is responsible + /// for removing the old instruction. + MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef Ops, + int FrameIndex, + LiveIntervals *LIS = nullptr) const; + + /// Same as the previous version except it allows folding of any load and + /// store from / to any address, not just from a specific stack slot. + MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef Ops, + MachineInstr &LoadMI, + LiveIntervals *LIS = nullptr) const; + + /// Return true when there is potentially a faster code sequence + /// for an instruction chain ending in \p Root. All potential patterns are + /// returned in the \p Pattern vector. Pattern should be sorted in priority + /// order since the pattern evaluator stops checking as soon as it finds a + /// faster sequence. + /// \param Root - Instruction that could be combined with one of its operands + /// \param Patterns - Vector of possible combination patterns + virtual bool getMachineCombinerPatterns( + MachineInstr &Root, + SmallVectorImpl &Patterns) const; + + /// Return true when a code sequence can improve throughput. It + /// should be called only for instructions in loops. + /// \param Pattern - combiner pattern + virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const; + + /// Return true if the input \P Inst is part of a chain of dependent ops + /// that are suitable for reassociation, otherwise return false. + /// If the instruction's operands must be commuted to have a previous + /// instruction of the same type define the first source operand, \P Commuted + /// will be set to true. + bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const; + + /// Return true when \P Inst is both associative and commutative. + virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const { + return false; + } + + /// Return true when \P Inst has reassociable operands in the same \P MBB. + virtual bool hasReassociableOperands(const MachineInstr &Inst, + const MachineBasicBlock *MBB) const; + + /// Return true when \P Inst has reassociable sibling. + bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const; + + /// When getMachineCombinerPatterns() finds patterns, this function generates + /// the instructions that could replace the original code sequence. The client + /// has to decide whether the actual replacement is beneficial or not. + /// \param Root - Instruction that could be combined with one of its operands + /// \param Pattern - Combination pattern for Root + /// \param InsInstrs - Vector of new instructions that implement P + /// \param DelInstrs - Old instructions, including Root, that could be + /// replaced by InsInstr + /// \param InstrIdxForVirtReg - map of virtual register to instruction in + /// InsInstr that defines it + virtual void genAlternativeCodeSequence( + MachineInstr &Root, MachineCombinerPattern Pattern, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstrIdxForVirtReg) const; + + /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to + /// reduce critical path length. + void reassociateOps(MachineInstr &Root, MachineInstr &Prev, + MachineCombinerPattern Pattern, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstrIdxForVirtReg) const; + + /// This is an architecture-specific helper function of reassociateOps. + /// Set special operand attributes for new instructions after reassociation. + virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, + MachineInstr &NewMI1, + MachineInstr &NewMI2) const {} + + /// Return true when a target supports MachineCombiner. + virtual bool useMachineCombiner() const { return false; } + +protected: + /// Target-dependent implementation for foldMemoryOperand. + /// Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + /// The instruction and any auxiliary instructions necessary will be inserted + /// at InsertPt. + virtual MachineInstr * + foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, + ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS = nullptr) const { + return nullptr; + } + + /// Target-dependent implementation for foldMemoryOperand. + /// Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + /// The instruction and any auxiliary instructions necessary will be inserted + /// at InsertPt. + virtual MachineInstr *foldMemoryOperandImpl( + MachineFunction &MF, MachineInstr &MI, ArrayRef Ops, + MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, + LiveIntervals *LIS = nullptr) const { + return nullptr; + } + + /// \brief Target-dependent implementation of getRegSequenceInputs. + /// + /// \returns true if it is possible to build the equivalent + /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise. + /// + /// \pre MI.isRegSequenceLike(). + /// + /// \see TargetInstrInfo::getRegSequenceInputs. + virtual bool getRegSequenceLikeInputs( + const MachineInstr &MI, unsigned DefIdx, + SmallVectorImpl &InputRegs) const { + return false; + } + + /// \brief Target-dependent implementation of getExtractSubregInputs. + /// + /// \returns true if it is possible to build the equivalent + /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. + /// + /// \pre MI.isExtractSubregLike(). + /// + /// \see TargetInstrInfo::getExtractSubregInputs. + virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, + unsigned DefIdx, + RegSubRegPairAndIdx &InputReg) const { + return false; + } + + /// \brief Target-dependent implementation of getInsertSubregInputs. + /// + /// \returns true if it is possible to build the equivalent + /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. + /// + /// \pre MI.isInsertSubregLike(). + /// + /// \see TargetInstrInfo::getInsertSubregInputs. + virtual bool + getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, + RegSubRegPair &BaseReg, + RegSubRegPairAndIdx &InsertedReg) const { + return false; + } + +public: + /// getAddressSpaceForPseudoSourceKind - Given the kind of memory + /// (e.g. stack) the target returns the corresponding address space. + virtual unsigned + getAddressSpaceForPseudoSourceKind(PseudoSourceValue::PSVKind Kind) const { + return 0; + } + + /// unfoldMemoryOperand - Separate a single instruction which folded a load or + /// a store or a load and a store into two or more instruction. If this is + /// possible, returns true as well as the new instructions by reference. + virtual bool + unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, + bool UnfoldLoad, bool UnfoldStore, + SmallVectorImpl &NewMIs) const { + return false; + } + + virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, + SmallVectorImpl &NewNodes) const { + return false; + } + + /// Returns the opcode of the would be new + /// instruction after load / store are unfolded from an instruction of the + /// specified opcode. It returns zero if the specified unfolding is not + /// possible. If LoadRegIndex is non-null, it is filled in with the operand + /// index of the operand which will hold the register holding the loaded + /// value. + virtual unsigned + getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, + unsigned *LoadRegIndex = nullptr) const { + return 0; + } + + /// This is used by the pre-regalloc scheduler to determine if two loads are + /// loading from the same base address. It should only return true if the base + /// pointers are the same and the only differences between the two addresses + /// are the offset. It also returns the offsets by reference. + virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, + int64_t &Offset1, + int64_t &Offset2) const { + return false; + } + + /// This is a used by the pre-regalloc scheduler to determine (in conjunction + /// with areLoadsFromSameBasePtr) if two loads should be scheduled together. + /// On some targets if two loads are loading from + /// addresses in the same cache line, it's better if they are scheduled + /// together. This function takes two integers that represent the load offsets + /// from the common base address. It returns true if it decides it's desirable + /// to schedule the two loads together. "NumLoads" is the number of loads that + /// have already been scheduled after Load1. + virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, + int64_t Offset1, int64_t Offset2, + unsigned NumLoads) const { + return false; + } + + /// Get the base register and byte offset of an instruction that reads/writes + /// memory. + virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg, + int64_t &Offset, + const TargetRegisterInfo *TRI) const { + return false; + } + + /// Return true if the instruction contains a base register and offset. If + /// true, the function also sets the operand position in the instruction + /// for the base register and offset. + virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, + unsigned &BasePos, + unsigned &OffsetPos) const { + return false; + } + + /// If the instruction is an increment of a constant value, return the amount. + virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const { + return false; + } + + /// Returns true if the two given memory operations should be scheduled + /// adjacent. Note that you have to add: + /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); + /// or + /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); + /// to TargetPassConfig::createMachineScheduler() to have an effect. + virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1, + MachineInstr &SecondLdSt, unsigned BaseReg2, + unsigned NumLoads) const { + llvm_unreachable("target did not implement shouldClusterMemOps()"); + } + + /// Reverses the branch condition of the specified condition list, + /// returning false on success and true if it cannot be reversed. + virtual bool + reverseBranchCondition(SmallVectorImpl &Cond) const { + return true; + } + + /// Insert a noop into the instruction stream at the specified point. + virtual void insertNoop(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const; + + /// Return the noop instruction to use for a noop. + virtual void getNoop(MCInst &NopInst) const; + + /// Return true for post-incremented instructions. + virtual bool isPostIncrement(const MachineInstr &MI) const { return false; } + + /// Returns true if the instruction is already predicated. + virtual bool isPredicated(const MachineInstr &MI) const { return false; } + + /// Returns true if the instruction is a + /// terminator instruction that has not been predicated. + virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const; + + /// Returns true if MI is an unconditional tail call. + virtual bool isUnconditionalTailCall(const MachineInstr &MI) const { + return false; + } + + /// Returns true if the tail call can be made conditional on BranchCond. + virtual bool canMakeTailCallConditional(SmallVectorImpl &Cond, + const MachineInstr &TailCall) const { + return false; + } + + /// Replace the conditional branch in MBB with a conditional tail call. + virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, + SmallVectorImpl &Cond, + const MachineInstr &TailCall) const { + llvm_unreachable("Target didn't implement replaceBranchWithTailCall!"); + } + + /// Convert the instruction into a predicated instruction. + /// It returns true if the operation was successful. + virtual bool PredicateInstruction(MachineInstr &MI, + ArrayRef Pred) const; + + /// Returns true if the first specified predicate + /// subsumes the second, e.g. GE subsumes GT. + virtual bool SubsumesPredicate(ArrayRef Pred1, + ArrayRef Pred2) const { + return false; + } + + /// If the specified instruction defines any predicate + /// or condition code register(s) used for predication, returns true as well + /// as the definition predicate(s) by reference. + virtual bool DefinesPredicate(MachineInstr &MI, + std::vector &Pred) const { + return false; + } + + /// Return true if the specified instruction can be predicated. + /// By default, this returns true for every instruction with a + /// PredicateOperand. + virtual bool isPredicable(const MachineInstr &MI) const { + return MI.getDesc().isPredicable(); + } + + /// Return true if it's safe to move a machine + /// instruction that defines the specified register class. + virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { + return true; + } + + /// Test if the given instruction should be considered a scheduling boundary. + /// This primarily includes labels and terminators. + virtual bool isSchedulingBoundary(const MachineInstr &MI, + const MachineBasicBlock *MBB, + const MachineFunction &MF) const; + + /// Measure the specified inline asm to determine an approximation of its + /// length. + virtual unsigned getInlineAsmLength(const char *Str, + const MCAsmInfo &MAI) const; + + /// Allocate and return a hazard recognizer to use for this target when + /// scheduling the machine instructions before register allocation. + virtual ScheduleHazardRecognizer * + CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, + const ScheduleDAG *DAG) const; + + /// Allocate and return a hazard recognizer to use for this target when + /// scheduling the machine instructions before register allocation. + virtual ScheduleHazardRecognizer * + CreateTargetMIHazardRecognizer(const InstrItineraryData *, + const ScheduleDAG *DAG) const; + + /// Allocate and return a hazard recognizer to use for this target when + /// scheduling the machine instructions after register allocation. + virtual ScheduleHazardRecognizer * + CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, + const ScheduleDAG *DAG) const; + + /// Allocate and return a hazard recognizer to use for by non-scheduling + /// passes. + virtual ScheduleHazardRecognizer * + CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { + return nullptr; + } + + /// Provide a global flag for disabling the PreRA hazard recognizer that + /// targets may choose to honor. + bool usePreRAHazardRecognizer() const; + + /// For a comparison instruction, return the source registers + /// in SrcReg and SrcReg2 if having two register operands, and the value it + /// compares against in CmpValue. Return true if the comparison instruction + /// can be analyzed. + virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, + unsigned &SrcReg2, int &Mask, int &Value) const { + return false; + } + + /// See if the comparison instruction can be converted + /// into something more efficient. E.g., on ARM most instructions can set the + /// flags register, obviating the need for a separate CMP. + virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, + unsigned SrcReg2, int Mask, int Value, + const MachineRegisterInfo *MRI) const { + return false; + } + virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; } + + /// Try to remove the load by folding it to a register operand at the use. + /// We fold the load instructions if and only if the + /// def and use are in the same BB. We only look at one load and see + /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register + /// defined by the load we are trying to fold. DefMI returns the machine + /// instruction that defines FoldAsLoadDefReg, and the function returns + /// the machine instruction generated due to folding. + virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, + const MachineRegisterInfo *MRI, + unsigned &FoldAsLoadDefReg, + MachineInstr *&DefMI) const { + return nullptr; + } + + /// 'Reg' is known to be defined by a move immediate instruction, + /// try to fold the immediate into the use instruction. + /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true, + /// then the caller may assume that DefMI has been erased from its parent + /// block. The caller may assume that it will not be erased by this + /// function otherwise. + virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, + unsigned Reg, MachineRegisterInfo *MRI) const { + return false; + } + + /// Return the number of u-operations the given machine + /// instruction will be decoded to on the target cpu. The itinerary's + /// IssueWidth is the number of microops that can be dispatched each + /// cycle. An instruction with zero microops takes no dispatch resources. + virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, + const MachineInstr &MI) const; + + /// Return true for pseudo instructions that don't consume any + /// machine resources in their current form. These are common cases that the + /// scheduler should consider free, rather than conservatively handling them + /// as instructions with no itinerary. + bool isZeroCost(unsigned Opcode) const { + return Opcode <= TargetOpcode::COPY; + } + + virtual int getOperandLatency(const InstrItineraryData *ItinData, + SDNode *DefNode, unsigned DefIdx, + SDNode *UseNode, unsigned UseIdx) const; + + /// Compute and return the use operand latency of a given pair of def and use. + /// In most cases, the static scheduling itinerary was enough to determine the + /// operand latency. But it may not be possible for instructions with variable + /// number of defs / uses. + /// + /// This is a raw interface to the itinerary that may be directly overridden + /// by a target. Use computeOperandLatency to get the best estimate of + /// latency. + virtual int getOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr &DefMI, unsigned DefIdx, + const MachineInstr &UseMI, + unsigned UseIdx) const; + + /// Compute the instruction latency of a given instruction. + /// If the instruction has higher cost when predicated, it's returned via + /// PredCost. + virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr &MI, + unsigned *PredCost = nullptr) const; + + virtual unsigned getPredicationCost(const MachineInstr &MI) const; + + virtual int getInstrLatency(const InstrItineraryData *ItinData, + SDNode *Node) const; + + /// Return the default expected latency for a def based on its opcode. + unsigned defaultDefLatency(const MCSchedModel &SchedModel, + const MachineInstr &DefMI) const; + + int computeDefOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr &DefMI) const; + + /// Return true if this opcode has high latency to its result. + virtual bool isHighLatencyDef(int opc) const { return false; } + + /// Compute operand latency between a def of 'Reg' + /// and a use in the current loop. Return true if the target considered + /// it 'high'. This is used by optimization passes such as machine LICM to + /// determine whether it makes sense to hoist an instruction out even in a + /// high register pressure situation. + virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, + const MachineRegisterInfo *MRI, + const MachineInstr &DefMI, unsigned DefIdx, + const MachineInstr &UseMI, + unsigned UseIdx) const { + return false; + } + + /// Compute operand latency of a def of 'Reg'. Return true + /// if the target considered it 'low'. + virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, + const MachineInstr &DefMI, + unsigned DefIdx) const; + + /// Perform target-specific instruction verification. + virtual bool verifyInstruction(const MachineInstr &MI, + StringRef &ErrInfo) const { + return true; + } + + /// Return the current execution domain and bit mask of + /// possible domains for instruction. + /// + /// Some micro-architectures have multiple execution domains, and multiple + /// opcodes that perform the same operation in different domains. For + /// example, the x86 architecture provides the por, orps, and orpd + /// instructions that all do the same thing. There is a latency penalty if a + /// register is written in one domain and read in another. + /// + /// This function returns a pair (domain, mask) containing the execution + /// domain of MI, and a bit mask of possible domains. The setExecutionDomain + /// function can be used to change the opcode to one of the domains in the + /// bit mask. Instructions whose execution domain can't be changed should + /// return a 0 mask. + /// + /// The execution domain numbers don't have any special meaning except domain + /// 0 is used for instructions that are not associated with any interesting + /// execution domain. + /// + virtual std::pair + getExecutionDomain(const MachineInstr &MI) const { + return std::make_pair(0, 0); + } + + /// Change the opcode of MI to execute in Domain. + /// + /// The bit (1 << Domain) must be set in the mask returned from + /// getExecutionDomain(MI). + virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {} + + /// Returns the preferred minimum clearance + /// before an instruction with an unwanted partial register update. + /// + /// Some instructions only write part of a register, and implicitly need to + /// read the other parts of the register. This may cause unwanted stalls + /// preventing otherwise unrelated instructions from executing in parallel in + /// an out-of-order CPU. + /// + /// For example, the x86 instruction cvtsi2ss writes its result to bits + /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so + /// the instruction needs to wait for the old value of the register to become + /// available: + /// + /// addps %xmm1, %xmm0 + /// movaps %xmm0, (%rax) + /// cvtsi2ss %rbx, %xmm0 + /// + /// In the code above, the cvtsi2ss instruction needs to wait for the addps + /// instruction before it can issue, even though the high bits of %xmm0 + /// probably aren't needed. + /// + /// This hook returns the preferred clearance before MI, measured in + /// instructions. Other defs of MI's operand OpNum are avoided in the last N + /// instructions before MI. It should only return a positive value for + /// unwanted dependencies. If the old bits of the defined register have + /// useful values, or if MI is determined to otherwise read the dependency, + /// the hook should return 0. + /// + /// The unwanted dependency may be handled by: + /// + /// 1. Allocating the same register for an MI def and use. That makes the + /// unwanted dependency identical to a required dependency. + /// + /// 2. Allocating a register for the def that has no defs in the previous N + /// instructions. + /// + /// 3. Calling breakPartialRegDependency() with the same arguments. This + /// allows the target to insert a dependency breaking instruction. + /// + virtual unsigned + getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, + const TargetRegisterInfo *TRI) const { + // The default implementation returns 0 for no partial register dependency. + return 0; + } + + /// \brief Return the minimum clearance before an instruction that reads an + /// unused register. + /// + /// For example, AVX instructions may copy part of a register operand into + /// the unused high bits of the destination register. + /// + /// vcvtsi2sdq %rax, %xmm0, %xmm14 + /// + /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a + /// false dependence on any previous write to %xmm0. + /// + /// This hook works similarly to getPartialRegUpdateClearance, except that it + /// does not take an operand index. Instead sets \p OpNum to the index of the + /// unused register. + virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, + const TargetRegisterInfo *TRI) const { + // The default implementation returns 0 for no undef register dependency. + return 0; + } + + /// Insert a dependency-breaking instruction + /// before MI to eliminate an unwanted dependency on OpNum. + /// + /// If it wasn't possible to avoid a def in the last N instructions before MI + /// (see getPartialRegUpdateClearance), this hook will be called to break the + /// unwanted dependency. + /// + /// On x86, an xorps instruction can be used as a dependency breaker: + /// + /// addps %xmm1, %xmm0 + /// movaps %xmm0, (%rax) + /// xorps %xmm0, %xmm0 + /// cvtsi2ss %rbx, %xmm0 + /// + /// An operand should be added to MI if an instruction was + /// inserted. This ties the instructions together in the post-ra scheduler. + /// + virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, + const TargetRegisterInfo *TRI) const {} + + /// Create machine specific model for scheduling. + virtual DFAPacketizer * + CreateTargetScheduleState(const TargetSubtargetInfo &) const { + return nullptr; + } + + /// Sometimes, it is possible for the target + /// to tell, even without aliasing information, that two MIs access different + /// memory addresses. This function returns true if two MIs access different + /// memory addresses and false otherwise. + /// + /// Assumes any physical registers used to compute addresses have the same + /// value for both instructions. (This is the most useful assumption for + /// post-RA scheduling.) + /// + /// See also MachineInstr::mayAlias, which is implemented on top of this + /// function. + virtual bool + areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, + AliasAnalysis *AA = nullptr) const { + assert((MIa.mayLoad() || MIa.mayStore()) && + "MIa must load from or modify a memory location"); + assert((MIb.mayLoad() || MIb.mayStore()) && + "MIb must load from or modify a memory location"); + return false; + } + + /// \brief Return the value to use for the MachineCSE's LookAheadLimit, + /// which is a heuristic used for CSE'ing phys reg defs. + virtual unsigned getMachineCSELookAheadLimit() const { + // The default lookahead is small to prevent unprofitable quadratic + // behavior. + return 5; + } + + /// Return an array that contains the ids of the target indices (used for the + /// TargetIndex machine operand) and their names. + /// + /// MIR Serialization is able to serialize only the target indices that are + /// defined by this method. + virtual ArrayRef> + getSerializableTargetIndices() const { + return None; + } + + /// Decompose the machine operand's target flags into two values - the direct + /// target flag value and any of bit flags that are applied. + virtual std::pair + decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const { + return std::make_pair(0u, 0u); + } + + /// Return an array that contains the direct target flag values and their + /// names. + /// + /// MIR Serialization is able to serialize only the target flags that are + /// defined by this method. + virtual ArrayRef> + getSerializableDirectMachineOperandTargetFlags() const { + return None; + } + + /// Return an array that contains the bitmask target flag values and their + /// names. + /// + /// MIR Serialization is able to serialize only the target flags that are + /// defined by this method. + virtual ArrayRef> + getSerializableBitmaskMachineOperandTargetFlags() const { + return None; + } + + /// Return an array that contains the MMO target flag values and their + /// names. + /// + /// MIR Serialization is able to serialize only the MMO target flags that are + /// defined by this method. + virtual ArrayRef> + getSerializableMachineMemOperandTargetFlags() const { + return None; + } + + /// Determines whether \p Inst is a tail call instruction. Override this + /// method on targets that do not properly set MCID::Return and MCID::Call on + /// tail call instructions." + virtual bool isTailCall(const MachineInstr &Inst) const { + return Inst.isReturn() && Inst.isCall(); + } + + /// True if the instruction is bound to the top of its basic block and no + /// other instructions shall be inserted before it. This can be implemented + /// to prevent register allocator to insert spills before such instructions. + virtual bool isBasicBlockPrologue(const MachineInstr &MI) const { + return false; + } + + /// \brief Describes the number of instructions that it will take to call and + /// construct a frame for a given outlining candidate. + struct MachineOutlinerInfo { + /// Number of instructions to call an outlined function for this candidate. + unsigned CallOverhead; + + /// \brief Number of instructions to construct an outlined function frame + /// for this candidate. + unsigned FrameOverhead; + + /// \brief Represents the specific instructions that must be emitted to + /// construct a call to this candidate. + unsigned CallConstructionID; + + /// \brief Represents the specific instructions that must be emitted to + /// construct a frame for this candidate's outlined function. + unsigned FrameConstructionID; + + MachineOutlinerInfo() {} + MachineOutlinerInfo(unsigned CallOverhead, unsigned FrameOverhead, + unsigned CallConstructionID, + unsigned FrameConstructionID) + : CallOverhead(CallOverhead), FrameOverhead(FrameOverhead), + CallConstructionID(CallConstructionID), + FrameConstructionID(FrameConstructionID) {} + }; + + /// \brief Returns a \p MachineOutlinerInfo struct containing target-specific + /// information for a set of outlining candidates. + virtual MachineOutlinerInfo getOutlininingCandidateInfo( + std::vector< + std::pair> + &RepeatedSequenceLocs) const { + llvm_unreachable( + "Target didn't implement TargetInstrInfo::getOutliningOverhead!"); + } + + /// Represents how an instruction should be mapped by the outliner. + /// \p Legal instructions are those which are safe to outline. + /// \p Illegal instructions are those which cannot be outlined. + /// \p Invisible instructions are instructions which can be outlined, but + /// shouldn't actually impact the outlining result. + enum MachineOutlinerInstrType { Legal, Illegal, Invisible }; + + /// Returns how or if \p MI should be outlined. + virtual MachineOutlinerInstrType getOutliningType(MachineInstr &MI) const { + llvm_unreachable( + "Target didn't implement TargetInstrInfo::getOutliningType!"); + } + + /// Insert a custom epilogue for outlined functions. + /// This may be empty, in which case no epilogue or return statement will be + /// emitted. + virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB, + MachineFunction &MF, + const MachineOutlinerInfo &MInfo) const { + llvm_unreachable( + "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!"); + } + + /// Insert a call to an outlined function into the program. + /// Returns an iterator to the spot where we inserted the call. This must be + /// implemented by the target. + virtual MachineBasicBlock::iterator + insertOutlinedCall(Module &M, MachineBasicBlock &MBB, + MachineBasicBlock::iterator &It, MachineFunction &MF, + const MachineOutlinerInfo &MInfo) const { + llvm_unreachable( + "Target didn't implement TargetInstrInfo::insertOutlinedCall!"); + } + + /// Insert a custom prologue for outlined functions. + /// This may be empty, in which case no prologue will be emitted. + virtual void insertOutlinerPrologue(MachineBasicBlock &MBB, + MachineFunction &MF, + const MachineOutlinerInfo &MInfo) const { + llvm_unreachable( + "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!"); + } + + /// Return true if the function can safely be outlined from. + /// A function \p MF is considered safe for outlining if an outlined function + /// produced from instructions in F will produce a program which produces the + /// same output for any set of given inputs. + virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, + bool OutlineFromLinkOnceODRs) const { + llvm_unreachable("Target didn't implement " + "TargetInstrInfo::isFunctionSafeToOutlineFrom!"); + } + +private: + unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode; + unsigned CatchRetOpcode; + unsigned ReturnOpcode; +}; + +/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair. +template <> struct DenseMapInfo { + using RegInfo = DenseMapInfo; + + static inline TargetInstrInfo::RegSubRegPair getEmptyKey() { + return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(), + RegInfo::getEmptyKey()); + } + + static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() { + return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(), + RegInfo::getTombstoneKey()); + } + + /// \brief Reuse getHashValue implementation from + /// std::pair. + static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) { + std::pair PairVal = std::make_pair(Val.Reg, Val.SubReg); + return DenseMapInfo>::getHashValue(PairVal); + } + + static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, + const TargetInstrInfo::RegSubRegPair &RHS) { + return RegInfo::isEqual(LHS.Reg, RHS.Reg) && + RegInfo::isEqual(LHS.SubReg, RHS.SubReg); + } +}; + +} // end namespace llvm + +#endif // LLVM_TARGET_TARGETINSTRINFO_H diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h deleted file mode 100644 index 5d230d820dbf..000000000000 --- a/include/llvm/Target/TargetInstrInfo.h +++ /dev/null @@ -1,1691 +0,0 @@ -//===- llvm/Target/TargetInstrInfo.h - Instruction Info ---------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file describes the target machine instruction set to the code generator. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TARGET_TARGETINSTRINFO_H -#define LLVM_TARGET_TARGETINSTRINFO_H - -#include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/DenseMapInfo.h" -#include "llvm/ADT/None.h" -#include "llvm/CodeGen/LiveIntervalAnalysis.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineCombinerPattern.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/CodeGen/MachineOperand.h" -#include "llvm/CodeGen/PseudoSourceValue.h" -#include "llvm/MC/MCInstrInfo.h" -#include "llvm/Support/BranchProbability.h" -#include "llvm/Support/ErrorHandling.h" -#include -#include -#include -#include -#include - -namespace llvm { - -class DFAPacketizer; -class InstrItineraryData; -class LiveVariables; -class MachineMemOperand; -class MachineRegisterInfo; -class MCAsmInfo; -class MCInst; -struct MCSchedModel; -class Module; -class ScheduleDAG; -class ScheduleHazardRecognizer; -class SDNode; -class SelectionDAG; -class RegScavenger; -class TargetRegisterClass; -class TargetRegisterInfo; -class TargetSchedModel; -class TargetSubtargetInfo; - -template class SmallVectorImpl; - -//--------------------------------------------------------------------------- -/// -/// TargetInstrInfo - Interface to description of machine instruction set -/// -class TargetInstrInfo : public MCInstrInfo { -public: - TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, - unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u) - : CallFrameSetupOpcode(CFSetupOpcode), - CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode), - ReturnOpcode(ReturnOpcode) {} - TargetInstrInfo(const TargetInstrInfo &) = delete; - TargetInstrInfo &operator=(const TargetInstrInfo &) = delete; - virtual ~TargetInstrInfo(); - - static bool isGenericOpcode(unsigned Opc) { - return Opc <= TargetOpcode::GENERIC_OP_END; - } - - /// Given a machine instruction descriptor, returns the register - /// class constraint for OpNum, or NULL. - const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum, - const TargetRegisterInfo *TRI, - const MachineFunction &MF) const; - - /// Return true if the instruction is trivially rematerializable, meaning it - /// has no side effects and requires no operands that aren't always available. - /// This means the only allowed uses are constants and unallocatable physical - /// registers so that the instructions result is independent of the place - /// in the function. - bool isTriviallyReMaterializable(const MachineInstr &MI, - AliasAnalysis *AA = nullptr) const { - return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF || - (MI.getDesc().isRematerializable() && - (isReallyTriviallyReMaterializable(MI, AA) || - isReallyTriviallyReMaterializableGeneric(MI, AA))); - } - -protected: - /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is - /// set, this hook lets the target specify whether the instruction is actually - /// trivially rematerializable, taking into consideration its operands. This - /// predicate must return false if the instruction has any side effects other - /// than producing a value, or if it requres any address registers that are - /// not always available. - /// Requirements must be check as stated in isTriviallyReMaterializable() . - virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI, - AliasAnalysis *AA) const { - return false; - } - - /// This method commutes the operands of the given machine instruction MI. - /// The operands to be commuted are specified by their indices OpIdx1 and - /// OpIdx2. - /// - /// If a target has any instructions that are commutable but require - /// converting to different instructions or making non-trivial changes - /// to commute them, this method can be overloaded to do that. - /// The default implementation simply swaps the commutable operands. - /// - /// If NewMI is false, MI is modified in place and returned; otherwise, a - /// new machine instruction is created and returned. - /// - /// Do not call this method for a non-commutable instruction. - /// Even though the instruction is commutable, the method may still - /// fail to commute the operands, null pointer is returned in such cases. - virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, - unsigned OpIdx1, - unsigned OpIdx2) const; - - /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable - /// operand indices to (ResultIdx1, ResultIdx2). - /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be - /// predefined to some indices or be undefined (designated by the special - /// value 'CommuteAnyOperandIndex'). - /// The predefined result indices cannot be re-defined. - /// The function returns true iff after the result pair redefinition - /// the fixed result pair is equal to or equivalent to the source pair of - /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that - /// the pairs (x,y) and (y,x) are equivalent. - static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, - unsigned CommutableOpIdx1, - unsigned CommutableOpIdx2); - -private: - /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is - /// set and the target hook isReallyTriviallyReMaterializable returns false, - /// this function does target-independent tests to determine if the - /// instruction is really trivially rematerializable. - bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI, - AliasAnalysis *AA) const; - -public: - /// These methods return the opcode of the frame setup/destroy instructions - /// if they exist (-1 otherwise). Some targets use pseudo instructions in - /// order to abstract away the difference between operating with a frame - /// pointer and operating without, through the use of these two instructions. - /// - unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } - unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } - - /// Returns true if the argument is a frame pseudo instruction. - bool isFrameInstr(const MachineInstr &I) const { - return I.getOpcode() == getCallFrameSetupOpcode() || - I.getOpcode() == getCallFrameDestroyOpcode(); - } - - /// Returns true if the argument is a frame setup pseudo instruction. - bool isFrameSetup(const MachineInstr &I) const { - return I.getOpcode() == getCallFrameSetupOpcode(); - } - - /// Returns size of the frame associated with the given frame instruction. - /// For frame setup instruction this is frame that is set up space set up - /// after the instruction. For frame destroy instruction this is the frame - /// freed by the caller. - /// Note, in some cases a call frame (or a part of it) may be prepared prior - /// to the frame setup instruction. It occurs in the calls that involve - /// inalloca arguments. This function reports only the size of the frame part - /// that is set up between the frame setup and destroy pseudo instructions. - int64_t getFrameSize(const MachineInstr &I) const { - assert(isFrameInstr(I) && "Not a frame instruction"); - assert(I.getOperand(0).getImm() >= 0); - return I.getOperand(0).getImm(); - } - - /// Returns the total frame size, which is made up of the space set up inside - /// the pair of frame start-stop instructions and the space that is set up - /// prior to the pair. - int64_t getFrameTotalSize(const MachineInstr &I) const { - if (isFrameSetup(I)) { - assert(I.getOperand(1).getImm() >= 0 && - "Frame size must not be negative"); - return getFrameSize(I) + I.getOperand(1).getImm(); - } - return getFrameSize(I); - } - - unsigned getCatchReturnOpcode() const { return CatchRetOpcode; } - unsigned getReturnOpcode() const { return ReturnOpcode; } - - /// Returns the actual stack pointer adjustment made by an instruction - /// as part of a call sequence. By default, only call frame setup/destroy - /// instructions adjust the stack, but targets may want to override this - /// to enable more fine-grained adjustment, or adjust by a different value. - virtual int getSPAdjust(const MachineInstr &MI) const; - - /// Return true if the instruction is a "coalescable" extension instruction. - /// That is, it's like a copy where it's legal for the source to overlap the - /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's - /// expected the pre-extension value is available as a subreg of the result - /// register. This also returns the sub-register index in SubIdx. - virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, - unsigned &DstReg, unsigned &SubIdx) const { - return false; - } - - /// If the specified machine instruction is a direct - /// load from a stack slot, return the virtual or physical register number of - /// the destination along with the FrameIndex of the loaded stack slot. If - /// not, return 0. This predicate must return 0 if the instruction has - /// any side effects other than loading from the stack slot. - virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, - int &FrameIndex) const { - return 0; - } - - /// Check for post-frame ptr elimination stack locations as well. - /// This uses a heuristic so it isn't reliable for correctness. - virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, - int &FrameIndex) const { - return 0; - } - - /// If the specified machine instruction has a load from a stack slot, - /// return true along with the FrameIndex of the loaded stack slot and the - /// machine mem operand containing the reference. - /// If not, return false. Unlike isLoadFromStackSlot, this returns true for - /// any instructions that loads from the stack. This is just a hint, as some - /// cases may be missed. - virtual bool hasLoadFromStackSlot(const MachineInstr &MI, - const MachineMemOperand *&MMO, - int &FrameIndex) const; - - /// If the specified machine instruction is a direct - /// store to a stack slot, return the virtual or physical register number of - /// the source reg along with the FrameIndex of the loaded stack slot. If - /// not, return 0. This predicate must return 0 if the instruction has - /// any side effects other than storing to the stack slot. - virtual unsigned isStoreToStackSlot(const MachineInstr &MI, - int &FrameIndex) const { - return 0; - } - - /// Check for post-frame ptr elimination stack locations as well. - /// This uses a heuristic, so it isn't reliable for correctness. - virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, - int &FrameIndex) const { - return 0; - } - - /// If the specified machine instruction has a store to a stack slot, - /// return true along with the FrameIndex of the loaded stack slot and the - /// machine mem operand containing the reference. - /// If not, return false. Unlike isStoreToStackSlot, - /// this returns true for any instructions that stores to the - /// stack. This is just a hint, as some cases may be missed. - virtual bool hasStoreToStackSlot(const MachineInstr &MI, - const MachineMemOperand *&MMO, - int &FrameIndex) const; - - /// Return true if the specified machine instruction - /// is a copy of one stack slot to another and has no other effect. - /// Provide the identity of the two frame indices. - virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, - int &SrcFrameIndex) const { - return false; - } - - /// Compute the size in bytes and offset within a stack slot of a spilled - /// register or subregister. - /// - /// \param [out] Size in bytes of the spilled value. - /// \param [out] Offset in bytes within the stack slot. - /// \returns true if both Size and Offset are successfully computed. - /// - /// Not all subregisters have computable spill slots. For example, - /// subregisters registers may not be byte-sized, and a pair of discontiguous - /// subregisters has no single offset. - /// - /// Targets with nontrivial bigendian implementations may need to override - /// this, particularly to support spilled vector registers. - virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, - unsigned &Size, unsigned &Offset, - const MachineFunction &MF) const; - - /// Returns the size in bytes of the specified MachineInstr, or ~0U - /// when this function is not implemented by a target. - virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const { - return ~0U; - } - - /// Return true if the instruction is as cheap as a move instruction. - /// - /// Targets for different archs need to override this, and different - /// micro-architectures can also be finely tuned inside. - virtual bool isAsCheapAsAMove(const MachineInstr &MI) const { - return MI.isAsCheapAsAMove(); - } - - /// Return true if the instruction should be sunk by MachineSink. - /// - /// MachineSink determines on its own whether the instruction is safe to sink; - /// this gives the target a hook to override the default behavior with regards - /// to which instructions should be sunk. - virtual bool shouldSink(const MachineInstr &MI) const { return true; } - - /// Re-issue the specified 'original' instruction at the - /// specific location targeting a new destination register. - /// The register in Orig->getOperand(0).getReg() will be substituted by - /// DestReg:SubIdx. Any existing subreg index is preserved or composed with - /// SubIdx. - virtual void reMaterialize(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, unsigned DestReg, - unsigned SubIdx, const MachineInstr &Orig, - const TargetRegisterInfo &TRI) const; - - /// \brief Clones instruction or the whole instruction bundle \p Orig and - /// insert into \p MBB before \p InsertBefore. The target may update operands - /// that are required to be unique. - /// - /// \p Orig must not return true for MachineInstr::isNotDuplicable(). - virtual MachineInstr &duplicate(MachineBasicBlock &MBB, - MachineBasicBlock::iterator InsertBefore, - const MachineInstr &Orig) const; - - /// This method must be implemented by targets that - /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target - /// may be able to convert a two-address instruction into one or more true - /// three-address instructions on demand. This allows the X86 target (for - /// example) to convert ADD and SHL instructions into LEA instructions if they - /// would require register copies due to two-addressness. - /// - /// This method returns a null pointer if the transformation cannot be - /// performed, otherwise it returns the last new instruction. - /// - virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, - MachineInstr &MI, - LiveVariables *LV) const { - return nullptr; - } - - // This constant can be used as an input value of operand index passed to - // the method findCommutedOpIndices() to tell the method that the - // corresponding operand index is not pre-defined and that the method - // can pick any commutable operand. - static const unsigned CommuteAnyOperandIndex = ~0U; - - /// This method commutes the operands of the given machine instruction MI. - /// - /// The operands to be commuted are specified by their indices OpIdx1 and - /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value - /// 'CommuteAnyOperandIndex', which means that the method is free to choose - /// any arbitrarily chosen commutable operand. If both arguments are set to - /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable - /// operands; then commutes them if such operands could be found. - /// - /// If NewMI is false, MI is modified in place and returned; otherwise, a - /// new machine instruction is created and returned. - /// - /// Do not call this method for a non-commutable instruction or - /// for non-commuable operands. - /// Even though the instruction is commutable, the method may still - /// fail to commute the operands, null pointer is returned in such cases. - MachineInstr * - commuteInstruction(MachineInstr &MI, bool NewMI = false, - unsigned OpIdx1 = CommuteAnyOperandIndex, - unsigned OpIdx2 = CommuteAnyOperandIndex) const; - - /// Returns true iff the routine could find two commutable operands in the - /// given machine instruction. - /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. - /// If any of the INPUT values is set to the special value - /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable - /// operand, then returns its index in the corresponding argument. - /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method - /// looks for 2 commutable operands. - /// If INPUT values refer to some operands of MI, then the method simply - /// returns true if the corresponding operands are commutable and returns - /// false otherwise. - /// - /// For example, calling this method this way: - /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; - /// findCommutedOpIndices(MI, Op1, Op2); - /// can be interpreted as a query asking to find an operand that would be - /// commutable with the operand#1. - virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, - unsigned &SrcOpIdx2) const; - - /// A pair composed of a register and a sub-register index. - /// Used to give some type checking when modeling Reg:SubReg. - struct RegSubRegPair { - unsigned Reg; - unsigned SubReg; - - RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0) - : Reg(Reg), SubReg(SubReg) {} - }; - - /// A pair composed of a pair of a register and a sub-register index, - /// and another sub-register index. - /// Used to give some type checking when modeling Reg:SubReg1, SubReg2. - struct RegSubRegPairAndIdx : RegSubRegPair { - unsigned SubIdx; - - RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0, - unsigned SubIdx = 0) - : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {} - }; - - /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI - /// and \p DefIdx. - /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of - /// the list is modeled as . - /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce - /// two elements: - /// - vreg1:sub1, sub0 - /// - vreg2<:0>, sub1 - /// - /// \returns true if it is possible to build such an input sequence - /// with the pair \p MI, \p DefIdx. False otherwise. - /// - /// \pre MI.isRegSequence() or MI.isRegSequenceLike(). - /// - /// \note The generic implementation does not provide any support for - /// MI.isRegSequenceLike(). In other words, one has to override - /// getRegSequenceLikeInputs for target specific instructions. - bool - getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, - SmallVectorImpl &InputRegs) const; - - /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI - /// and \p DefIdx. - /// \p [out] InputReg of the equivalent EXTRACT_SUBREG. - /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce: - /// - vreg1:sub1, sub0 - /// - /// \returns true if it is possible to build such an input sequence - /// with the pair \p MI, \p DefIdx. False otherwise. - /// - /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike(). - /// - /// \note The generic implementation does not provide any support for - /// MI.isExtractSubregLike(). In other words, one has to override - /// getExtractSubregLikeInputs for target specific instructions. - bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, - RegSubRegPairAndIdx &InputReg) const; - - /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI - /// and \p DefIdx. - /// \p [out] BaseReg and \p [out] InsertedReg contain - /// the equivalent inputs of INSERT_SUBREG. - /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce: - /// - BaseReg: vreg0:sub0 - /// - InsertedReg: vreg1:sub1, sub3 - /// - /// \returns true if it is possible to build such an input sequence - /// with the pair \p MI, \p DefIdx. False otherwise. - /// - /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike(). - /// - /// \note The generic implementation does not provide any support for - /// MI.isInsertSubregLike(). In other words, one has to override - /// getInsertSubregLikeInputs for target specific instructions. - bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, - RegSubRegPair &BaseReg, - RegSubRegPairAndIdx &InsertedReg) const; - - /// Return true if two machine instructions would produce identical values. - /// By default, this is only true when the two instructions - /// are deemed identical except for defs. If this function is called when the - /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for - /// aggressive checks. - virtual bool produceSameValue(const MachineInstr &MI0, - const MachineInstr &MI1, - const MachineRegisterInfo *MRI = nullptr) const; - - /// \returns true if a branch from an instruction with opcode \p BranchOpc - /// bytes is capable of jumping to a position \p BrOffset bytes away. - virtual bool isBranchOffsetInRange(unsigned BranchOpc, - int64_t BrOffset) const { - llvm_unreachable("target did not implement"); - } - - /// \returns The block that branch instruction \p MI jumps to. - virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const { - llvm_unreachable("target did not implement"); - } - - /// Insert an unconditional indirect branch at the end of \p MBB to \p - /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to - /// the offset of the position to insert the new branch. - /// - /// \returns The number of bytes added to the block. - virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB, - MachineBasicBlock &NewDestBB, - const DebugLoc &DL, - int64_t BrOffset = 0, - RegScavenger *RS = nullptr) const { - llvm_unreachable("target did not implement"); - } - - /// Analyze the branching code at the end of MBB, returning - /// true if it cannot be understood (e.g. it's a switch dispatch or isn't - /// implemented for a target). Upon success, this returns false and returns - /// with the following information in various cases: - /// - /// 1. If this block ends with no branches (it just falls through to its succ) - /// just return false, leaving TBB/FBB null. - /// 2. If this block ends with only an unconditional branch, it sets TBB to be - /// the destination block. - /// 3. If this block ends with a conditional branch and it falls through to a - /// successor block, it sets TBB to be the branch destination block and a - /// list of operands that evaluate the condition. These operands can be - /// passed to other TargetInstrInfo methods to create new branches. - /// 4. If this block ends with a conditional branch followed by an - /// unconditional branch, it returns the 'true' destination in TBB, the - /// 'false' destination in FBB, and a list of operands that evaluate the - /// condition. These operands can be passed to other TargetInstrInfo - /// methods to create new branches. - /// - /// Note that removeBranch and insertBranch must be implemented to support - /// cases where this method returns success. - /// - /// If AllowModify is true, then this routine is allowed to modify the basic - /// block (e.g. delete instructions after the unconditional branch). - /// - /// The CFG information in MBB.Predecessors and MBB.Successors must be valid - /// before calling this function. - virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - SmallVectorImpl &Cond, - bool AllowModify = false) const { - return true; - } - - /// Represents a predicate at the MachineFunction level. The control flow a - /// MachineBranchPredicate represents is: - /// - /// Reg = LHS `Predicate` RHS == ConditionDef - /// if Reg then goto TrueDest else goto FalseDest - /// - struct MachineBranchPredicate { - enum ComparePredicate { - PRED_EQ, // True if two values are equal - PRED_NE, // True if two values are not equal - PRED_INVALID // Sentinel value - }; - - ComparePredicate Predicate = PRED_INVALID; - MachineOperand LHS = MachineOperand::CreateImm(0); - MachineOperand RHS = MachineOperand::CreateImm(0); - MachineBasicBlock *TrueDest = nullptr; - MachineBasicBlock *FalseDest = nullptr; - MachineInstr *ConditionDef = nullptr; - - /// SingleUseCondition is true if ConditionDef is dead except for the - /// branch(es) at the end of the basic block. - /// - bool SingleUseCondition = false; - - explicit MachineBranchPredicate() = default; - }; - - /// Analyze the branching code at the end of MBB and parse it into the - /// MachineBranchPredicate structure if possible. Returns false on success - /// and true on failure. - /// - /// If AllowModify is true, then this routine is allowed to modify the basic - /// block (e.g. delete instructions after the unconditional branch). - /// - virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, - MachineBranchPredicate &MBP, - bool AllowModify = false) const { - return true; - } - - /// Remove the branching code at the end of the specific MBB. - /// This is only invoked in cases where AnalyzeBranch returns success. It - /// returns the number of instructions that were removed. - /// If \p BytesRemoved is non-null, report the change in code size from the - /// removed instructions. - virtual unsigned removeBranch(MachineBasicBlock &MBB, - int *BytesRemoved = nullptr) const { - llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!"); - } - - /// Insert branch code into the end of the specified MachineBasicBlock. The - /// operands to this method are the same as those returned by AnalyzeBranch. - /// This is only invoked in cases where AnalyzeBranch returns success. It - /// returns the number of instructions inserted. If \p BytesAdded is non-null, - /// report the change in code size from the added instructions. - /// - /// It is also invoked by tail merging to add unconditional branches in - /// cases where AnalyzeBranch doesn't apply because there was no original - /// branch to analyze. At least this much must be implemented, else tail - /// merging needs to be disabled. - /// - /// The CFG information in MBB.Predecessors and MBB.Successors must be valid - /// before calling this function. - virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - ArrayRef Cond, - const DebugLoc &DL, - int *BytesAdded = nullptr) const { - llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!"); - } - - unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, - MachineBasicBlock *DestBB, - const DebugLoc &DL, - int *BytesAdded = nullptr) const { - return insertBranch(MBB, DestBB, nullptr, ArrayRef(), DL, - BytesAdded); - } - - /// Analyze the loop code, return true if it cannot be understoo. Upon - /// success, this function returns false and returns information about the - /// induction variable and compare instruction used at the end. - virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, - MachineInstr *&CmpInst) const { - return true; - } - - /// Generate code to reduce the loop iteration by one and check if the loop is - /// finished. Return the value/register of the the new loop count. We need - /// this function when peeling off one or more iterations of a loop. This - /// function assumes the nth iteration is peeled first. - virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar, - MachineInstr &Cmp, - SmallVectorImpl &Cond, - SmallVectorImpl &PrevInsts, - unsigned Iter, unsigned MaxIter) const { - llvm_unreachable("Target didn't implement ReduceLoopCount"); - } - - /// Delete the instruction OldInst and everything after it, replacing it with - /// an unconditional branch to NewDest. This is used by the tail merging pass. - virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, - MachineBasicBlock *NewDest) const; - - /// Return true if it's legal to split the given basic - /// block at the specified instruction (i.e. instruction would be the start - /// of a new basic block). - virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI) const { - return true; - } - - /// Return true if it's profitable to predicate - /// instructions with accumulated instruction latency of "NumCycles" - /// of the specified basic block, where the probability of the instructions - /// being executed is given by Probability, and Confidence is a measure - /// of our confidence that it will be properly predicted. - virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, - unsigned ExtraPredCycles, - BranchProbability Probability) const { - return false; - } - - /// Second variant of isProfitableToIfCvt. This one - /// checks for the case where two basic blocks from true and false path - /// of a if-then-else (diamond) are predicated on mutally exclusive - /// predicates, where the probability of the true path being taken is given - /// by Probability, and Confidence is a measure of our confidence that it - /// will be properly predicted. - virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, - unsigned ExtraTCycles, - MachineBasicBlock &FMBB, unsigned NumFCycles, - unsigned ExtraFCycles, - BranchProbability Probability) const { - return false; - } - - /// Return true if it's profitable for if-converter to duplicate instructions - /// of specified accumulated instruction latencies in the specified MBB to - /// enable if-conversion. - /// The probability of the instructions being executed is given by - /// Probability, and Confidence is a measure of our confidence that it - /// will be properly predicted. - virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, - unsigned NumCycles, - BranchProbability Probability) const { - return false; - } - - /// Return true if it's profitable to unpredicate - /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually - /// exclusive predicates. - /// e.g. - /// subeq r0, r1, #1 - /// addne r0, r1, #1 - /// => - /// sub r0, r1, #1 - /// addne r0, r1, #1 - /// - /// This may be profitable is conditional instructions are always executed. - virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, - MachineBasicBlock &FMBB) const { - return false; - } - - /// Return true if it is possible to insert a select - /// instruction that chooses between TrueReg and FalseReg based on the - /// condition code in Cond. - /// - /// When successful, also return the latency in cycles from TrueReg, - /// FalseReg, and Cond to the destination register. In most cases, a select - /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1 - /// - /// Some x86 implementations have 2-cycle cmov instructions. - /// - /// @param MBB Block where select instruction would be inserted. - /// @param Cond Condition returned by AnalyzeBranch. - /// @param TrueReg Virtual register to select when Cond is true. - /// @param FalseReg Virtual register to select when Cond is false. - /// @param CondCycles Latency from Cond+Branch to select output. - /// @param TrueCycles Latency from TrueReg to select output. - /// @param FalseCycles Latency from FalseReg to select output. - virtual bool canInsertSelect(const MachineBasicBlock &MBB, - ArrayRef Cond, unsigned TrueReg, - unsigned FalseReg, int &CondCycles, - int &TrueCycles, int &FalseCycles) const { - return false; - } - - /// Insert a select instruction into MBB before I that will copy TrueReg to - /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false. - /// - /// This function can only be called after canInsertSelect() returned true. - /// The condition in Cond comes from AnalyzeBranch, and it can be assumed - /// that the same flags or registers required by Cond are available at the - /// insertion point. - /// - /// @param MBB Block where select instruction should be inserted. - /// @param I Insertion point. - /// @param DL Source location for debugging. - /// @param DstReg Virtual register to be defined by select instruction. - /// @param Cond Condition as computed by AnalyzeBranch. - /// @param TrueReg Virtual register to copy when Cond is true. - /// @param FalseReg Virtual register to copy when Cons is false. - virtual void insertSelect(MachineBasicBlock &MBB, - MachineBasicBlock::iterator I, const DebugLoc &DL, - unsigned DstReg, ArrayRef Cond, - unsigned TrueReg, unsigned FalseReg) const { - llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!"); - } - - /// Analyze the given select instruction, returning true if - /// it cannot be understood. It is assumed that MI->isSelect() is true. - /// - /// When successful, return the controlling condition and the operands that - /// determine the true and false result values. - /// - /// Result = SELECT Cond, TrueOp, FalseOp - /// - /// Some targets can optimize select instructions, for example by predicating - /// the instruction defining one of the operands. Such targets should set - /// Optimizable. - /// - /// @param MI Select instruction to analyze. - /// @param Cond Condition controlling the select. - /// @param TrueOp Operand number of the value selected when Cond is true. - /// @param FalseOp Operand number of the value selected when Cond is false. - /// @param Optimizable Returned as true if MI is optimizable. - /// @returns False on success. - virtual bool analyzeSelect(const MachineInstr &MI, - SmallVectorImpl &Cond, - unsigned &TrueOp, unsigned &FalseOp, - bool &Optimizable) const { - assert(MI.getDesc().isSelect() && "MI must be a select instruction"); - return true; - } - - /// Given a select instruction that was understood by - /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by - /// merging it with one of its operands. Returns NULL on failure. - /// - /// When successful, returns the new select instruction. The client is - /// responsible for deleting MI. - /// - /// If both sides of the select can be optimized, PreferFalse is used to pick - /// a side. - /// - /// @param MI Optimizable select instruction. - /// @param NewMIs Set that record all MIs in the basic block up to \p - /// MI. Has to be updated with any newly created MI or deleted ones. - /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. - /// @returns Optimized instruction or NULL. - virtual MachineInstr *optimizeSelect(MachineInstr &MI, - SmallPtrSetImpl &NewMIs, - bool PreferFalse = false) const { - // This function must be implemented if Optimizable is ever set. - llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!"); - } - - /// Emit instructions to copy a pair of physical registers. - /// - /// This function should support copies within any legal register class as - /// well as any cross-class copies created during instruction selection. - /// - /// The source and destination registers may overlap, which may require a - /// careful implementation when multiple copy instructions are required for - /// large registers. See for example the ARM target. - virtual void copyPhysReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, const DebugLoc &DL, - unsigned DestReg, unsigned SrcReg, - bool KillSrc) const { - llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); - } - - /// Store the specified register of the given register class to the specified - /// stack frame index. The store instruction is to be added to the given - /// machine basic block before the specified machine instruction. If isKill - /// is true, the register operand is the last use and must be marked kill. - virtual void storeRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { - llvm_unreachable("Target didn't implement " - "TargetInstrInfo::storeRegToStackSlot!"); - } - - /// Load the specified register of the given register class from the specified - /// stack frame index. The load instruction is to be added to the given - /// machine basic block before the specified machine instruction. - virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const { - llvm_unreachable("Target didn't implement " - "TargetInstrInfo::loadRegFromStackSlot!"); - } - - /// This function is called for all pseudo instructions - /// that remain after register allocation. Many pseudo instructions are - /// created to help register allocation. This is the place to convert them - /// into real instructions. The target can edit MI in place, or it can insert - /// new instructions and erase MI. The function should return true if - /// anything was changed. - virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; } - - /// Check whether the target can fold a load that feeds a subreg operand - /// (or a subreg operand that feeds a store). - /// For example, X86 may want to return true if it can fold - /// movl (%esp), %eax - /// subb, %al, ... - /// Into: - /// subb (%esp), ... - /// - /// Ideally, we'd like the target implementation of foldMemoryOperand() to - /// reject subregs - but since this behavior used to be enforced in the - /// target-independent code, moving this responsibility to the targets - /// has the potential of causing nasty silent breakage in out-of-tree targets. - virtual bool isSubregFoldable() const { return false; } - - /// Attempt to fold a load or store of the specified stack - /// slot into the specified machine instruction for the specified operand(s). - /// If this is possible, a new instruction is returned with the specified - /// operand folded, otherwise NULL is returned. - /// The new instruction is inserted before MI, and the client is responsible - /// for removing the old instruction. - MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef Ops, - int FrameIndex, - LiveIntervals *LIS = nullptr) const; - - /// Same as the previous version except it allows folding of any load and - /// store from / to any address, not just from a specific stack slot. - MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef Ops, - MachineInstr &LoadMI, - LiveIntervals *LIS = nullptr) const; - - /// Return true when there is potentially a faster code sequence - /// for an instruction chain ending in \p Root. All potential patterns are - /// returned in the \p Pattern vector. Pattern should be sorted in priority - /// order since the pattern evaluator stops checking as soon as it finds a - /// faster sequence. - /// \param Root - Instruction that could be combined with one of its operands - /// \param Patterns - Vector of possible combination patterns - virtual bool getMachineCombinerPatterns( - MachineInstr &Root, - SmallVectorImpl &Patterns) const; - - /// Return true when a code sequence can improve throughput. It - /// should be called only for instructions in loops. - /// \param Pattern - combiner pattern - virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const; - - /// Return true if the input \P Inst is part of a chain of dependent ops - /// that are suitable for reassociation, otherwise return false. - /// If the instruction's operands must be commuted to have a previous - /// instruction of the same type define the first source operand, \P Commuted - /// will be set to true. - bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const; - - /// Return true when \P Inst is both associative and commutative. - virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const { - return false; - } - - /// Return true when \P Inst has reassociable operands in the same \P MBB. - virtual bool hasReassociableOperands(const MachineInstr &Inst, - const MachineBasicBlock *MBB) const; - - /// Return true when \P Inst has reassociable sibling. - bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const; - - /// When getMachineCombinerPatterns() finds patterns, this function generates - /// the instructions that could replace the original code sequence. The client - /// has to decide whether the actual replacement is beneficial or not. - /// \param Root - Instruction that could be combined with one of its operands - /// \param Pattern - Combination pattern for Root - /// \param InsInstrs - Vector of new instructions that implement P - /// \param DelInstrs - Old instructions, including Root, that could be - /// replaced by InsInstr - /// \param InstrIdxForVirtReg - map of virtual register to instruction in - /// InsInstr that defines it - virtual void genAlternativeCodeSequence( - MachineInstr &Root, MachineCombinerPattern Pattern, - SmallVectorImpl &InsInstrs, - SmallVectorImpl &DelInstrs, - DenseMap &InstrIdxForVirtReg) const; - - /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to - /// reduce critical path length. - void reassociateOps(MachineInstr &Root, MachineInstr &Prev, - MachineCombinerPattern Pattern, - SmallVectorImpl &InsInstrs, - SmallVectorImpl &DelInstrs, - DenseMap &InstrIdxForVirtReg) const; - - /// This is an architecture-specific helper function of reassociateOps. - /// Set special operand attributes for new instructions after reassociation. - virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, - MachineInstr &NewMI1, - MachineInstr &NewMI2) const {} - - /// Return true when a target supports MachineCombiner. - virtual bool useMachineCombiner() const { return false; } - -protected: - /// Target-dependent implementation for foldMemoryOperand. - /// Target-independent code in foldMemoryOperand will - /// take care of adding a MachineMemOperand to the newly created instruction. - /// The instruction and any auxiliary instructions necessary will be inserted - /// at InsertPt. - virtual MachineInstr * - foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, - ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex, - LiveIntervals *LIS = nullptr) const { - return nullptr; - } - - /// Target-dependent implementation for foldMemoryOperand. - /// Target-independent code in foldMemoryOperand will - /// take care of adding a MachineMemOperand to the newly created instruction. - /// The instruction and any auxiliary instructions necessary will be inserted - /// at InsertPt. - virtual MachineInstr *foldMemoryOperandImpl( - MachineFunction &MF, MachineInstr &MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, - LiveIntervals *LIS = nullptr) const { - return nullptr; - } - - /// \brief Target-dependent implementation of getRegSequenceInputs. - /// - /// \returns true if it is possible to build the equivalent - /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise. - /// - /// \pre MI.isRegSequenceLike(). - /// - /// \see TargetInstrInfo::getRegSequenceInputs. - virtual bool getRegSequenceLikeInputs( - const MachineInstr &MI, unsigned DefIdx, - SmallVectorImpl &InputRegs) const { - return false; - } - - /// \brief Target-dependent implementation of getExtractSubregInputs. - /// - /// \returns true if it is possible to build the equivalent - /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. - /// - /// \pre MI.isExtractSubregLike(). - /// - /// \see TargetInstrInfo::getExtractSubregInputs. - virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, - unsigned DefIdx, - RegSubRegPairAndIdx &InputReg) const { - return false; - } - - /// \brief Target-dependent implementation of getInsertSubregInputs. - /// - /// \returns true if it is possible to build the equivalent - /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. - /// - /// \pre MI.isInsertSubregLike(). - /// - /// \see TargetInstrInfo::getInsertSubregInputs. - virtual bool - getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, - RegSubRegPair &BaseReg, - RegSubRegPairAndIdx &InsertedReg) const { - return false; - } - -public: - /// getAddressSpaceForPseudoSourceKind - Given the kind of memory - /// (e.g. stack) the target returns the corresponding address space. - virtual unsigned - getAddressSpaceForPseudoSourceKind(PseudoSourceValue::PSVKind Kind) const { - return 0; - } - - /// unfoldMemoryOperand - Separate a single instruction which folded a load or - /// a store or a load and a store into two or more instruction. If this is - /// possible, returns true as well as the new instructions by reference. - virtual bool - unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, - bool UnfoldLoad, bool UnfoldStore, - SmallVectorImpl &NewMIs) const { - return false; - } - - virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, - SmallVectorImpl &NewNodes) const { - return false; - } - - /// Returns the opcode of the would be new - /// instruction after load / store are unfolded from an instruction of the - /// specified opcode. It returns zero if the specified unfolding is not - /// possible. If LoadRegIndex is non-null, it is filled in with the operand - /// index of the operand which will hold the register holding the loaded - /// value. - virtual unsigned - getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, - unsigned *LoadRegIndex = nullptr) const { - return 0; - } - - /// This is used by the pre-regalloc scheduler to determine if two loads are - /// loading from the same base address. It should only return true if the base - /// pointers are the same and the only differences between the two addresses - /// are the offset. It also returns the offsets by reference. - virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, - int64_t &Offset1, - int64_t &Offset2) const { - return false; - } - - /// This is a used by the pre-regalloc scheduler to determine (in conjunction - /// with areLoadsFromSameBasePtr) if two loads should be scheduled together. - /// On some targets if two loads are loading from - /// addresses in the same cache line, it's better if they are scheduled - /// together. This function takes two integers that represent the load offsets - /// from the common base address. It returns true if it decides it's desirable - /// to schedule the two loads together. "NumLoads" is the number of loads that - /// have already been scheduled after Load1. - virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, - int64_t Offset1, int64_t Offset2, - unsigned NumLoads) const { - return false; - } - - /// Get the base register and byte offset of an instruction that reads/writes - /// memory. - virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg, - int64_t &Offset, - const TargetRegisterInfo *TRI) const { - return false; - } - - /// Return true if the instruction contains a base register and offset. If - /// true, the function also sets the operand position in the instruction - /// for the base register and offset. - virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, - unsigned &BasePos, - unsigned &OffsetPos) const { - return false; - } - - /// If the instruction is an increment of a constant value, return the amount. - virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const { - return false; - } - - /// Returns true if the two given memory operations should be scheduled - /// adjacent. Note that you have to add: - /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); - /// or - /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); - /// to TargetPassConfig::createMachineScheduler() to have an effect. - virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1, - MachineInstr &SecondLdSt, unsigned BaseReg2, - unsigned NumLoads) const { - llvm_unreachable("target did not implement shouldClusterMemOps()"); - } - - /// Reverses the branch condition of the specified condition list, - /// returning false on success and true if it cannot be reversed. - virtual bool - reverseBranchCondition(SmallVectorImpl &Cond) const { - return true; - } - - /// Insert a noop into the instruction stream at the specified point. - virtual void insertNoop(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI) const; - - /// Return the noop instruction to use for a noop. - virtual void getNoop(MCInst &NopInst) const; - - /// Return true for post-incremented instructions. - virtual bool isPostIncrement(const MachineInstr &MI) const { return false; } - - /// Returns true if the instruction is already predicated. - virtual bool isPredicated(const MachineInstr &MI) const { return false; } - - /// Returns true if the instruction is a - /// terminator instruction that has not been predicated. - virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const; - - /// Returns true if MI is an unconditional tail call. - virtual bool isUnconditionalTailCall(const MachineInstr &MI) const { - return false; - } - - /// Returns true if the tail call can be made conditional on BranchCond. - virtual bool canMakeTailCallConditional(SmallVectorImpl &Cond, - const MachineInstr &TailCall) const { - return false; - } - - /// Replace the conditional branch in MBB with a conditional tail call. - virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, - SmallVectorImpl &Cond, - const MachineInstr &TailCall) const { - llvm_unreachable("Target didn't implement replaceBranchWithTailCall!"); - } - - /// Convert the instruction into a predicated instruction. - /// It returns true if the operation was successful. - virtual bool PredicateInstruction(MachineInstr &MI, - ArrayRef Pred) const; - - /// Returns true if the first specified predicate - /// subsumes the second, e.g. GE subsumes GT. - virtual bool SubsumesPredicate(ArrayRef Pred1, - ArrayRef Pred2) const { - return false; - } - - /// If the specified instruction defines any predicate - /// or condition code register(s) used for predication, returns true as well - /// as the definition predicate(s) by reference. - virtual bool DefinesPredicate(MachineInstr &MI, - std::vector &Pred) const { - return false; - } - - /// Return true if the specified instruction can be predicated. - /// By default, this returns true for every instruction with a - /// PredicateOperand. - virtual bool isPredicable(const MachineInstr &MI) const { - return MI.getDesc().isPredicable(); - } - - /// Return true if it's safe to move a machine - /// instruction that defines the specified register class. - virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { - return true; - } - - /// Test if the given instruction should be considered a scheduling boundary. - /// This primarily includes labels and terminators. - virtual bool isSchedulingBoundary(const MachineInstr &MI, - const MachineBasicBlock *MBB, - const MachineFunction &MF) const; - - /// Measure the specified inline asm to determine an approximation of its - /// length. - virtual unsigned getInlineAsmLength(const char *Str, - const MCAsmInfo &MAI) const; - - /// Allocate and return a hazard recognizer to use for this target when - /// scheduling the machine instructions before register allocation. - virtual ScheduleHazardRecognizer * - CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, - const ScheduleDAG *DAG) const; - - /// Allocate and return a hazard recognizer to use for this target when - /// scheduling the machine instructions before register allocation. - virtual ScheduleHazardRecognizer * - CreateTargetMIHazardRecognizer(const InstrItineraryData *, - const ScheduleDAG *DAG) const; - - /// Allocate and return a hazard recognizer to use for this target when - /// scheduling the machine instructions after register allocation. - virtual ScheduleHazardRecognizer * - CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, - const ScheduleDAG *DAG) const; - - /// Allocate and return a hazard recognizer to use for by non-scheduling - /// passes. - virtual ScheduleHazardRecognizer * - CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { - return nullptr; - } - - /// Provide a global flag for disabling the PreRA hazard recognizer that - /// targets may choose to honor. - bool usePreRAHazardRecognizer() const; - - /// For a comparison instruction, return the source registers - /// in SrcReg and SrcReg2 if having two register operands, and the value it - /// compares against in CmpValue. Return true if the comparison instruction - /// can be analyzed. - virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, - unsigned &SrcReg2, int &Mask, int &Value) const { - return false; - } - - /// See if the comparison instruction can be converted - /// into something more efficient. E.g., on ARM most instructions can set the - /// flags register, obviating the need for a separate CMP. - virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, - unsigned SrcReg2, int Mask, int Value, - const MachineRegisterInfo *MRI) const { - return false; - } - virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; } - - /// Try to remove the load by folding it to a register operand at the use. - /// We fold the load instructions if and only if the - /// def and use are in the same BB. We only look at one load and see - /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register - /// defined by the load we are trying to fold. DefMI returns the machine - /// instruction that defines FoldAsLoadDefReg, and the function returns - /// the machine instruction generated due to folding. - virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, - const MachineRegisterInfo *MRI, - unsigned &FoldAsLoadDefReg, - MachineInstr *&DefMI) const { - return nullptr; - } - - /// 'Reg' is known to be defined by a move immediate instruction, - /// try to fold the immediate into the use instruction. - /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true, - /// then the caller may assume that DefMI has been erased from its parent - /// block. The caller may assume that it will not be erased by this - /// function otherwise. - virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, - unsigned Reg, MachineRegisterInfo *MRI) const { - return false; - } - - /// Return the number of u-operations the given machine - /// instruction will be decoded to on the target cpu. The itinerary's - /// IssueWidth is the number of microops that can be dispatched each - /// cycle. An instruction with zero microops takes no dispatch resources. - virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, - const MachineInstr &MI) const; - - /// Return true for pseudo instructions that don't consume any - /// machine resources in their current form. These are common cases that the - /// scheduler should consider free, rather than conservatively handling them - /// as instructions with no itinerary. - bool isZeroCost(unsigned Opcode) const { - return Opcode <= TargetOpcode::COPY; - } - - virtual int getOperandLatency(const InstrItineraryData *ItinData, - SDNode *DefNode, unsigned DefIdx, - SDNode *UseNode, unsigned UseIdx) const; - - /// Compute and return the use operand latency of a given pair of def and use. - /// In most cases, the static scheduling itinerary was enough to determine the - /// operand latency. But it may not be possible for instructions with variable - /// number of defs / uses. - /// - /// This is a raw interface to the itinerary that may be directly overridden - /// by a target. Use computeOperandLatency to get the best estimate of - /// latency. - virtual int getOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr &DefMI, unsigned DefIdx, - const MachineInstr &UseMI, - unsigned UseIdx) const; - - /// Compute the instruction latency of a given instruction. - /// If the instruction has higher cost when predicated, it's returned via - /// PredCost. - virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, - const MachineInstr &MI, - unsigned *PredCost = nullptr) const; - - virtual unsigned getPredicationCost(const MachineInstr &MI) const; - - virtual int getInstrLatency(const InstrItineraryData *ItinData, - SDNode *Node) const; - - /// Return the default expected latency for a def based on its opcode. - unsigned defaultDefLatency(const MCSchedModel &SchedModel, - const MachineInstr &DefMI) const; - - int computeDefOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr &DefMI) const; - - /// Return true if this opcode has high latency to its result. - virtual bool isHighLatencyDef(int opc) const { return false; } - - /// Compute operand latency between a def of 'Reg' - /// and a use in the current loop. Return true if the target considered - /// it 'high'. This is used by optimization passes such as machine LICM to - /// determine whether it makes sense to hoist an instruction out even in a - /// high register pressure situation. - virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, - const MachineRegisterInfo *MRI, - const MachineInstr &DefMI, unsigned DefIdx, - const MachineInstr &UseMI, - unsigned UseIdx) const { - return false; - } - - /// Compute operand latency of a def of 'Reg'. Return true - /// if the target considered it 'low'. - virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, - const MachineInstr &DefMI, - unsigned DefIdx) const; - - /// Perform target-specific instruction verification. - virtual bool verifyInstruction(const MachineInstr &MI, - StringRef &ErrInfo) const { - return true; - } - - /// Return the current execution domain and bit mask of - /// possible domains for instruction. - /// - /// Some micro-architectures have multiple execution domains, and multiple - /// opcodes that perform the same operation in different domains. For - /// example, the x86 architecture provides the por, orps, and orpd - /// instructions that all do the same thing. There is a latency penalty if a - /// register is written in one domain and read in another. - /// - /// This function returns a pair (domain, mask) containing the execution - /// domain of MI, and a bit mask of possible domains. The setExecutionDomain - /// function can be used to change the opcode to one of the domains in the - /// bit mask. Instructions whose execution domain can't be changed should - /// return a 0 mask. - /// - /// The execution domain numbers don't have any special meaning except domain - /// 0 is used for instructions that are not associated with any interesting - /// execution domain. - /// - virtual std::pair - getExecutionDomain(const MachineInstr &MI) const { - return std::make_pair(0, 0); - } - - /// Change the opcode of MI to execute in Domain. - /// - /// The bit (1 << Domain) must be set in the mask returned from - /// getExecutionDomain(MI). - virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {} - - /// Returns the preferred minimum clearance - /// before an instruction with an unwanted partial register update. - /// - /// Some instructions only write part of a register, and implicitly need to - /// read the other parts of the register. This may cause unwanted stalls - /// preventing otherwise unrelated instructions from executing in parallel in - /// an out-of-order CPU. - /// - /// For example, the x86 instruction cvtsi2ss writes its result to bits - /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so - /// the instruction needs to wait for the old value of the register to become - /// available: - /// - /// addps %xmm1, %xmm0 - /// movaps %xmm0, (%rax) - /// cvtsi2ss %rbx, %xmm0 - /// - /// In the code above, the cvtsi2ss instruction needs to wait for the addps - /// instruction before it can issue, even though the high bits of %xmm0 - /// probably aren't needed. - /// - /// This hook returns the preferred clearance before MI, measured in - /// instructions. Other defs of MI's operand OpNum are avoided in the last N - /// instructions before MI. It should only return a positive value for - /// unwanted dependencies. If the old bits of the defined register have - /// useful values, or if MI is determined to otherwise read the dependency, - /// the hook should return 0. - /// - /// The unwanted dependency may be handled by: - /// - /// 1. Allocating the same register for an MI def and use. That makes the - /// unwanted dependency identical to a required dependency. - /// - /// 2. Allocating a register for the def that has no defs in the previous N - /// instructions. - /// - /// 3. Calling breakPartialRegDependency() with the same arguments. This - /// allows the target to insert a dependency breaking instruction. - /// - virtual unsigned - getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, - const TargetRegisterInfo *TRI) const { - // The default implementation returns 0 for no partial register dependency. - return 0; - } - - /// \brief Return the minimum clearance before an instruction that reads an - /// unused register. - /// - /// For example, AVX instructions may copy part of a register operand into - /// the unused high bits of the destination register. - /// - /// vcvtsi2sdq %rax, %xmm0, %xmm14 - /// - /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a - /// false dependence on any previous write to %xmm0. - /// - /// This hook works similarly to getPartialRegUpdateClearance, except that it - /// does not take an operand index. Instead sets \p OpNum to the index of the - /// unused register. - virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, - const TargetRegisterInfo *TRI) const { - // The default implementation returns 0 for no undef register dependency. - return 0; - } - - /// Insert a dependency-breaking instruction - /// before MI to eliminate an unwanted dependency on OpNum. - /// - /// If it wasn't possible to avoid a def in the last N instructions before MI - /// (see getPartialRegUpdateClearance), this hook will be called to break the - /// unwanted dependency. - /// - /// On x86, an xorps instruction can be used as a dependency breaker: - /// - /// addps %xmm1, %xmm0 - /// movaps %xmm0, (%rax) - /// xorps %xmm0, %xmm0 - /// cvtsi2ss %rbx, %xmm0 - /// - /// An operand should be added to MI if an instruction was - /// inserted. This ties the instructions together in the post-ra scheduler. - /// - virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, - const TargetRegisterInfo *TRI) const {} - - /// Create machine specific model for scheduling. - virtual DFAPacketizer * - CreateTargetScheduleState(const TargetSubtargetInfo &) const { - return nullptr; - } - - /// Sometimes, it is possible for the target - /// to tell, even without aliasing information, that two MIs access different - /// memory addresses. This function returns true if two MIs access different - /// memory addresses and false otherwise. - /// - /// Assumes any physical registers used to compute addresses have the same - /// value for both instructions. (This is the most useful assumption for - /// post-RA scheduling.) - /// - /// See also MachineInstr::mayAlias, which is implemented on top of this - /// function. - virtual bool - areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, - AliasAnalysis *AA = nullptr) const { - assert((MIa.mayLoad() || MIa.mayStore()) && - "MIa must load from or modify a memory location"); - assert((MIb.mayLoad() || MIb.mayStore()) && - "MIb must load from or modify a memory location"); - return false; - } - - /// \brief Return the value to use for the MachineCSE's LookAheadLimit, - /// which is a heuristic used for CSE'ing phys reg defs. - virtual unsigned getMachineCSELookAheadLimit() const { - // The default lookahead is small to prevent unprofitable quadratic - // behavior. - return 5; - } - - /// Return an array that contains the ids of the target indices (used for the - /// TargetIndex machine operand) and their names. - /// - /// MIR Serialization is able to serialize only the target indices that are - /// defined by this method. - virtual ArrayRef> - getSerializableTargetIndices() const { - return None; - } - - /// Decompose the machine operand's target flags into two values - the direct - /// target flag value and any of bit flags that are applied. - virtual std::pair - decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const { - return std::make_pair(0u, 0u); - } - - /// Return an array that contains the direct target flag values and their - /// names. - /// - /// MIR Serialization is able to serialize only the target flags that are - /// defined by this method. - virtual ArrayRef> - getSerializableDirectMachineOperandTargetFlags() const { - return None; - } - - /// Return an array that contains the bitmask target flag values and their - /// names. - /// - /// MIR Serialization is able to serialize only the target flags that are - /// defined by this method. - virtual ArrayRef> - getSerializableBitmaskMachineOperandTargetFlags() const { - return None; - } - - /// Return an array that contains the MMO target flag values and their - /// names. - /// - /// MIR Serialization is able to serialize only the MMO target flags that are - /// defined by this method. - virtual ArrayRef> - getSerializableMachineMemOperandTargetFlags() const { - return None; - } - - /// Determines whether \p Inst is a tail call instruction. Override this - /// method on targets that do not properly set MCID::Return and MCID::Call on - /// tail call instructions." - virtual bool isTailCall(const MachineInstr &Inst) const { - return Inst.isReturn() && Inst.isCall(); - } - - /// True if the instruction is bound to the top of its basic block and no - /// other instructions shall be inserted before it. This can be implemented - /// to prevent register allocator to insert spills before such instructions. - virtual bool isBasicBlockPrologue(const MachineInstr &MI) const { - return false; - } - - /// \brief Describes the number of instructions that it will take to call and - /// construct a frame for a given outlining candidate. - struct MachineOutlinerInfo { - /// Number of instructions to call an outlined function for this candidate. - unsigned CallOverhead; - - /// \brief Number of instructions to construct an outlined function frame - /// for this candidate. - unsigned FrameOverhead; - - /// \brief Represents the specific instructions that must be emitted to - /// construct a call to this candidate. - unsigned CallConstructionID; - - /// \brief Represents the specific instructions that must be emitted to - /// construct a frame for this candidate's outlined function. - unsigned FrameConstructionID; - - MachineOutlinerInfo() {} - MachineOutlinerInfo(unsigned CallOverhead, unsigned FrameOverhead, - unsigned CallConstructionID, - unsigned FrameConstructionID) - : CallOverhead(CallOverhead), FrameOverhead(FrameOverhead), - CallConstructionID(CallConstructionID), - FrameConstructionID(FrameConstructionID) {} - }; - - /// \brief Returns a \p MachineOutlinerInfo struct containing target-specific - /// information for a set of outlining candidates. - virtual MachineOutlinerInfo getOutlininingCandidateInfo( - std::vector< - std::pair> - &RepeatedSequenceLocs) const { - llvm_unreachable( - "Target didn't implement TargetInstrInfo::getOutliningOverhead!"); - } - - /// Represents how an instruction should be mapped by the outliner. - /// \p Legal instructions are those which are safe to outline. - /// \p Illegal instructions are those which cannot be outlined. - /// \p Invisible instructions are instructions which can be outlined, but - /// shouldn't actually impact the outlining result. - enum MachineOutlinerInstrType { Legal, Illegal, Invisible }; - - /// Returns how or if \p MI should be outlined. - virtual MachineOutlinerInstrType getOutliningType(MachineInstr &MI) const { - llvm_unreachable( - "Target didn't implement TargetInstrInfo::getOutliningType!"); - } - - /// Insert a custom epilogue for outlined functions. - /// This may be empty, in which case no epilogue or return statement will be - /// emitted. - virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB, - MachineFunction &MF, - const MachineOutlinerInfo &MInfo) const { - llvm_unreachable( - "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!"); - } - - /// Insert a call to an outlined function into the program. - /// Returns an iterator to the spot where we inserted the call. This must be - /// implemented by the target. - virtual MachineBasicBlock::iterator - insertOutlinedCall(Module &M, MachineBasicBlock &MBB, - MachineBasicBlock::iterator &It, MachineFunction &MF, - const MachineOutlinerInfo &MInfo) const { - llvm_unreachable( - "Target didn't implement TargetInstrInfo::insertOutlinedCall!"); - } - - /// Insert a custom prologue for outlined functions. - /// This may be empty, in which case no prologue will be emitted. - virtual void insertOutlinerPrologue(MachineBasicBlock &MBB, - MachineFunction &MF, - const MachineOutlinerInfo &MInfo) const { - llvm_unreachable( - "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!"); - } - - /// Return true if the function can safely be outlined from. - /// A function \p MF is considered safe for outlining if an outlined function - /// produced from instructions in F will produce a program which produces the - /// same output for any set of given inputs. - virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, - bool OutlineFromLinkOnceODRs) const { - llvm_unreachable("Target didn't implement " - "TargetInstrInfo::isFunctionSafeToOutlineFrom!"); - } - -private: - unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode; - unsigned CatchRetOpcode; - unsigned ReturnOpcode; -}; - -/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair. -template <> struct DenseMapInfo { - using RegInfo = DenseMapInfo; - - static inline TargetInstrInfo::RegSubRegPair getEmptyKey() { - return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(), - RegInfo::getEmptyKey()); - } - - static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() { - return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(), - RegInfo::getTombstoneKey()); - } - - /// \brief Reuse getHashValue implementation from - /// std::pair. - static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) { - std::pair PairVal = std::make_pair(Val.Reg, Val.SubReg); - return DenseMapInfo>::getHashValue(PairVal); - } - - static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, - const TargetInstrInfo::RegSubRegPair &RHS) { - return RegInfo::isEqual(LHS.Reg, RHS.Reg) && - RegInfo::isEqual(LHS.SubReg, RHS.SubReg); - } -}; - -} // end namespace llvm - -#endif // LLVM_TARGET_TARGETINSTRINFO_H -- cgit v1.2.1 From 0fa582d74a7402b21b232eaf63b5d1c698265360 Mon Sep 17 00:00:00 2001 From: Rafael Espindola Date: Wed, 8 Nov 2017 01:05:44 +0000 Subject: Convert FileOutputBuffer to Expected. NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317649 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/FileOutputBuffer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/llvm/Support/FileOutputBuffer.h b/include/llvm/Support/FileOutputBuffer.h index 8db64098c368..53693f1dac27 100644 --- a/include/llvm/Support/FileOutputBuffer.h +++ b/include/llvm/Support/FileOutputBuffer.h @@ -17,7 +17,7 @@ #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" -#include "llvm/Support/ErrorOr.h" +#include "llvm/Support/Error.h" #include "llvm/Support/FileSystem.h" namespace llvm { @@ -37,7 +37,7 @@ public: /// Factory method to create an OutputBuffer object which manages a read/write /// buffer of the specified size. When committed, the buffer will be written /// to the file at the specified path. - static ErrorOr> + static Expected> create(StringRef FilePath, size_t Size, unsigned Flags = 0); /// Returns a pointer to the start of the buffer. -- cgit v1.2.1 From 14c636043c9d703b0ccfcb96e80f938e47784e5f Mon Sep 17 00:00:00 2001 From: Rafael Espindola Date: Wed, 8 Nov 2017 01:50:29 +0000 Subject: Convert FileOutputBuffer::commit to Error. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317656 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Support/FileOutputBuffer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/llvm/Support/FileOutputBuffer.h b/include/llvm/Support/FileOutputBuffer.h index 53693f1dac27..6aed423a01e3 100644 --- a/include/llvm/Support/FileOutputBuffer.h +++ b/include/llvm/Support/FileOutputBuffer.h @@ -57,7 +57,7 @@ public: /// is called, the file is deleted in the destructor. The optional parameter /// is used if it turns out you want the file size to be smaller than /// initially requested. - virtual std::error_code commit() = 0; + virtual Error commit() = 0; /// If this object was previously committed, the destructor just deletes /// this object. If this object was not committed, the destructor -- cgit v1.2.1 From 19b50e8dffa341a1790bcb72f8cbf095bea4ee9f Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Wed, 8 Nov 2017 08:52:31 +0000 Subject: DAG: Add computeKnownBitsForFrameIndex Some of the AMDGPU stack addressing modes require knowing the sign bit is zero. We used to accomplish this by custom lowering frame indexes, and then putting an AssertZext around a TargetFrameIndex. This required specifically looking for the AssextZext + frame index pattern which was moderately disgusting. The same could probably be accomplished with a target specific node, but would still require special handling of frame indexes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317671 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetLowering.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index c1d0b32f7d75..994480ebc90e 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -2678,6 +2678,15 @@ public: const SelectionDAG &DAG, unsigned Depth = 0) const; + /// Determine which of the bits of FrameIndex \p FIOp are known to be 0. + /// Default implementation computes low bits based on alignment + /// information. This should preserve known bits passed into it. + virtual void computeKnownBitsForFrameIndex(const SDValue FIOp, + KnownBits &Known, + const APInt &DemandedElts, + const SelectionDAG &DAG, + unsigned Depth = 0) const; + /// This method can be implemented by targets that want to expose additional /// information about sign bits to the DAG Combiner. The DemandedElts /// argument allows us to only collect the minimum sign bits that are shared -- cgit v1.2.1 From 4d211caa3764ce1f205e8bc8cfa579735633fb59 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Wed, 8 Nov 2017 09:26:06 +0000 Subject: [NFCI] Ensure TargetOpcode::* are compatible with guessInstructionProperties=0 rL162640 introduced CodeGenTarget::guessInstructionProperties. If a target sets guessInstructionProperties=0 in its FooInstrInfo, tablegen will error if it has to guess properties from patterns. Unfortunately, guessInstructionProperties=0 can't be used with current upstream LLVM as instructions in the TargetOpcode namespace are always included and sometimes have inferred properties for mayLoad, mayStore, and hasSideEffects. This patch provides the simplest possible fix to this problem, setting default values for these fields in the TargetOpcode scope. There is no intended functional change, as the explicitly set properties should match what was previously inferred. A number of the instructions had hasSideEffects=1 inferred unintentionally. This patch makes it explicit, while future patches (such as D37097) correct the property. Differential Revision: https://reviews.llvm.org/D37065 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317674 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/Target.td | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index 927da1c72d46..7dc2aec324ec 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -884,12 +884,16 @@ class InstrInfo { // Standard Pseudo Instructions. // This list must match TargetOpcodes.h and CodeGenTarget.cpp. // Only these instructions are allowed in the TargetOpcode namespace. -let isCodeGenOnly = 1, isPseudo = 1, hasNoSchedulingInfo = 1, - Namespace = "TargetOpcode" in { +// Ensure mayLoad and mayStore have a default value, so as not to break +// targets that set guessInstructionProperties=0. Any local definition of +// mayLoad/mayStore takes precedence over these default values. +let mayLoad = 0, mayStore = 0, isCodeGenOnly = 1, isPseudo = 1, + hasNoSchedulingInfo = 1, Namespace = "TargetOpcode" in { def PHI : Instruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins variable_ops); let AsmString = "PHINODE"; + let hasSideEffects = 1; } def INLINEASM : Instruction { let OutOperandList = (outs); @@ -902,6 +906,7 @@ def CFI_INSTRUCTION : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; + let hasSideEffects = 1; let isNotDuplicable = 0; } def EH_LABEL : Instruction { @@ -909,6 +914,7 @@ def EH_LABEL : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; + let hasSideEffects = 1; let isNotDuplicable = 1; } def GC_LABEL : Instruction { @@ -916,6 +922,7 @@ def GC_LABEL : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; + let hasSideEffects = 1; let isNotDuplicable = 1; } def ANNOTATION_LABEL : Instruction { @@ -923,6 +930,7 @@ def ANNOTATION_LABEL : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; + let hasSideEffects = 1; let isNotDuplicable = 1; } def KILL : Instruction { @@ -990,6 +998,7 @@ def BUNDLE : Instruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = "BUNDLE"; + let hasSideEffects = 1; } def LIFETIME_START : Instruction { let OutOperandList = (outs); @@ -1006,6 +1015,7 @@ def LIFETIME_END : Instruction { def STACKMAP : Instruction { let OutOperandList = (outs); let InOperandList = (ins i64imm:$id, i32imm:$nbytes, variable_ops); + let hasSideEffects = 1; let isCall = 1; let mayLoad = 1; let usesCustomInserter = 1; @@ -1014,6 +1024,7 @@ def PATCHPOINT : Instruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins i64imm:$id, i32imm:$nbytes, unknown:$callee, i32imm:$nargs, i32imm:$cc, variable_ops); + let hasSideEffects = 1; let isCall = 1; let mayLoad = 1; let usesCustomInserter = 1; @@ -1048,6 +1059,7 @@ def FAULTING_OP : Instruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins variable_ops); let usesCustomInserter = 1; + let hasSideEffects = 1; let mayLoad = 1; let mayStore = 1; let isTerminator = 1; -- cgit v1.2.1 From cf89e1c2fe829eab7b7b62d2a88b9cbf30e1f5d6 Mon Sep 17 00:00:00 2001 From: Adrian McCarthy Date: Wed, 8 Nov 2017 18:57:02 +0000 Subject: NFC: Rename MCSafeSEHFragment to MCSymbolIdFragment Summary: This fragment emits a symbol ID and will be useful for more than just Safe SEH tables (e.g., I plan to re-use it for Control Flow Guard tables). This is simply a rename refactor. Reviewers: rnk Subscribers: llvm-commits, hiraditya Differential Revision: https://reviews.llvm.org/D39770 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317703 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/MC/MCFragment.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/llvm/MC/MCFragment.h b/include/llvm/MC/MCFragment.h index 7c66b2126cd5..3d73a0279037 100644 --- a/include/llvm/MC/MCFragment.h +++ b/include/llvm/MC/MCFragment.h @@ -42,7 +42,7 @@ public: FT_DwarfFrame, FT_LEB, FT_Padding, - FT_SafeSEH, + FT_SymbolId, FT_CVInlineLines, FT_CVDefRange, FT_Dummy @@ -562,12 +562,13 @@ public: } }; -class MCSafeSEHFragment : public MCFragment { +/// Represents a symbol table index fragment. +class MCSymbolIdFragment : public MCFragment { const MCSymbol *Sym; public: - MCSafeSEHFragment(const MCSymbol *Sym, MCSection *Sec = nullptr) - : MCFragment(FT_SafeSEH, false, 0, Sec), Sym(Sym) {} + MCSymbolIdFragment(const MCSymbol *Sym, MCSection *Sec = nullptr) + : MCFragment(FT_SymbolId, false, 0, Sec), Sym(Sym) {} /// \name Accessors /// @{ @@ -578,7 +579,7 @@ public: /// @} static bool classof(const MCFragment *F) { - return F->getKind() == MCFragment::FT_SafeSEH; + return F->getKind() == MCFragment::FT_SymbolId; } }; -- cgit v1.2.1