summaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp10
-rw-r--r--lib/Transforms/IPO/LowerTypeTests.cpp2
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp16
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp9
-rw-r--r--lib/Transforms/IPO/SampleProfile.cpp26
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp3
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp11
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp109
-rw-r--r--lib/Transforms/Instrumentation/PGOInstrumentation.cpp28
-rw-r--r--lib/Transforms/Scalar/ADCE.cpp4
-rw-r--r--lib/Transforms/Scalar/CMakeLists.txt1
-rw-r--r--lib/Transforms/Scalar/CallSiteSplitting.cpp492
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp10
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp7
-rw-r--r--lib/Transforms/Scalar/LICM.cpp196
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopPredication.cpp234
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp2
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp12
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp42
-rw-r--r--lib/Transforms/Scalar/SROA.cpp6
-rw-r--r--lib/Transforms/Scalar/Scalar.cpp1
-rw-r--r--lib/Transforms/Utils/FunctionImportUtils.cpp17
-rw-r--r--lib/Transforms/Utils/Local.cpp37
-rw-r--r--lib/Transforms/Utils/LoopUtils.cpp12
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp4
-rw-r--r--lib/Transforms/Utils/SimplifyLibCalls.cpp37
-rw-r--r--lib/Transforms/Utils/SplitModule.cpp18
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp6
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp4
32 files changed, 1112 insertions, 260 deletions
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 12090bff381a..4bb2984e3b47 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -448,9 +448,13 @@ static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
for (auto *GVE : GVs) {
DIVariable *Var = GVE->getVariable();
DIExpression *Expr = GVE->getExpression();
- if (NumElements > 1)
- Expr = DIExpression::createFragmentExpression(Expr, FragmentOffsetInBits,
- FragmentSizeInBits);
+ if (NumElements > 1) {
+ if (auto E = DIExpression::createFragmentExpression(
+ Expr, FragmentOffsetInBits, FragmentSizeInBits))
+ Expr = *E;
+ else
+ return;
+ }
auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
NGV->addDebugInfo(NGVE);
}
diff --git a/lib/Transforms/IPO/LowerTypeTests.cpp b/lib/Transforms/IPO/LowerTypeTests.cpp
index 9fa5ed9ab2b8..6cef866b7b84 100644
--- a/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1401,7 +1401,7 @@ void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
FAlias->takeName(F);
if (FAlias->hasName())
F->setName(FAlias->getName() + ".cfi");
- F->replaceAllUsesWith(FAlias);
+ F->replaceUsesExceptBlockAddr(FAlias);
}
if (!F->isDeclarationForLinker())
F->setLinkage(GlobalValue::InternalLinkage);
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index b5267f75e417..c47d8b78df30 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -931,15 +931,17 @@ bool PartialInlinerImpl::tryPartialInline(FunctionCloner &Cloner) {
if (!shouldPartialInline(CS, Cloner, WeightedRcost, ORE))
continue;
- ORE.emit([&]() {
- return OptimizationRemark(DEBUG_TYPE, "PartiallyInlined",
- CS.getInstruction())
- << ore::NV("Callee", Cloner.OrigFunc) << " partially inlined into "
- << ore::NV("Caller", CS.getCaller());
- });
+ // Construct remark before doing the inlining, as after successful inlining
+ // the callsite is removed.
+ OptimizationRemark OR(DEBUG_TYPE, "PartiallyInlined", CS.getInstruction());
+ OR << ore::NV("Callee", Cloner.OrigFunc) << " partially inlined into "
+ << ore::NV("Caller", CS.getCaller());
InlineFunctionInfo IFI(nullptr, GetAssumptionCache, PSI);
- InlineFunction(CS, IFI);
+ if (!InlineFunction(CS, IFI))
+ continue;
+
+ ORE.emit(OR);
// Now update the entry count:
if (CalleeEntryCountV && CallSiteToProfCountMap.count(User)) {
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index 828eb5eee297..5d3736655093 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -467,6 +467,9 @@ void PassManagerBuilder::populateModulePassManager(
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
+ if (OptLevel > 2)
+ MPM.add(createCallSiteSplittingPass());
+
MPM.add(createIPSCCPPass()); // IP SCCP
MPM.add(createCalledValuePropagationPass());
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
@@ -545,6 +548,9 @@ void PassManagerBuilder::populateModulePassManager(
// unrolling/vectorization/... now. We'll first run the inliner + CGSCC passes
// during ThinLTO and perform the rest of the optimizations afterward.
if (PrepareForThinLTO) {
+ // Ensure we perform any last passes, but do so before renaming anonymous
+ // globals in case the passes add any.
+ addExtensionsToPM(EP_OptimizerLast, MPM);
// Rename anon globals to be able to export them in the summary.
MPM.add(createNameAnonGlobalPass());
return;
@@ -703,6 +709,9 @@ void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
PM.add(createInferFunctionAttrsLegacyPass());
if (OptLevel > 1) {
+ // Split call-site with more constrained arguments.
+ PM.add(createCallSiteSplittingPass());
+
// Indirect call promotion. This should promote all the targets that are
// left by the earlier promotion pass that promotes intra-module targets.
// This two-step promotion is to save the compile time. For LTO, it should
diff --git a/lib/Transforms/IPO/SampleProfile.cpp b/lib/Transforms/IPO/SampleProfile.cpp
index 34414f96ccae..8930e9b2b957 100644
--- a/lib/Transforms/IPO/SampleProfile.cpp
+++ b/lib/Transforms/IPO/SampleProfile.cpp
@@ -1182,24 +1182,20 @@ void SampleProfileLoader::buildEdges(Function &F) {
}
}
-/// Sorts the CallTargetMap \p M by count in descending order and stores the
-/// sorted result in \p Sorted. Returns the total counts.
-static uint64_t SortCallTargets(SmallVector<InstrProfValueData, 2> &Sorted,
- const SampleRecord::CallTargetMap &M) {
- Sorted.clear();
- uint64_t Sum = 0;
- for (auto I = M.begin(); I != M.end(); ++I) {
- Sum += I->getValue();
- Sorted.push_back({Function::getGUID(I->getKey()), I->getValue()});
- }
- std::sort(Sorted.begin(), Sorted.end(),
+/// Returns the sorted CallTargetMap \p M by count in descending order.
+static SmallVector<InstrProfValueData, 2> SortCallTargets(
+ const SampleRecord::CallTargetMap &M) {
+ SmallVector<InstrProfValueData, 2> R;
+ for (auto I = M.begin(); I != M.end(); ++I)
+ R.push_back({Function::getGUID(I->getKey()), I->getValue()});
+ std::sort(R.begin(), R.end(),
[](const InstrProfValueData &L, const InstrProfValueData &R) {
if (L.Count == R.Count)
return L.Value > R.Value;
else
return L.Count > R.Count;
});
- return Sum;
+ return R;
}
/// \brief Propagate weights into edges
@@ -1292,8 +1288,10 @@ void SampleProfileLoader::propagateWeights(Function &F) {
auto T = FS->findCallTargetMapAt(LineOffset, Discriminator);
if (!T || T.get().empty())
continue;
- SmallVector<InstrProfValueData, 2> SortedCallTargets;
- uint64_t Sum = SortCallTargets(SortedCallTargets, T.get());
+ SmallVector<InstrProfValueData, 2> SortedCallTargets =
+ SortCallTargets(T.get());
+ uint64_t Sum;
+ findIndirectCallFunctionSamples(I, Sum);
annotateValueSite(*I.getParent()->getParent()->getParent(), I,
SortedCallTargets, Sum, IPVK_IndirectCallTarget,
SortedCallTargets.size());
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 18b246b5d99f..d28d615f47ea 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -482,7 +482,7 @@ Value *FAddCombine::performFactorization(Instruction *I) {
return nullptr;
FastMathFlags Flags;
- Flags.setUnsafeAlgebra();
+ Flags.setFast();
if (I0) Flags &= I->getFastMathFlags();
if (I1) Flags &= I->getFastMathFlags();
@@ -511,7 +511,7 @@ Value *FAddCombine::performFactorization(Instruction *I) {
}
Value *FAddCombine::simplify(Instruction *I) {
- assert(I->hasUnsafeAlgebra() && "Should be in unsafe mode");
+ assert(I->isFast() && "Expected 'fast' instruction");
// Currently we are not able to handle vector type.
if (I->getType()->isVectorTy())
@@ -1386,7 +1386,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
return replaceInstUsesWith(I, V);
- if (I.hasUnsafeAlgebra()) {
+ if (I.isFast()) {
if (Value *V = FAddCombine(Builder).simplify(&I))
return replaceInstUsesWith(I, V);
}
@@ -1736,7 +1736,7 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
return replaceInstUsesWith(I, V);
- if (I.hasUnsafeAlgebra()) {
+ if (I.isFast()) {
if (Value *V = FAddCombine(Builder).simplify(&I))
return replaceInstUsesWith(I, V);
}
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 7a4abc9aca0c..a00e6f73ab8c 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2017,7 +2017,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::fmuladd: {
// Canonicalize fast fmuladd to the separate fmul + fadd.
- if (II->hasUnsafeAlgebra()) {
+ if (II->isFast()) {
BuilderTy::FastMathFlagGuard Guard(Builder);
Builder.setFastMathFlags(II->getFastMathFlags());
Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index cb4788576c59..2974449830d9 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -426,8 +426,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
// Look for an appropriate type:
// - The type of Idx if the magic fits
- // - The smallest fitting legal type if we have a DataLayout
- // - Default to i32
+ // - The smallest fitting legal type
if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
Ty = Idx->getType();
else
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index e6b975382671..87666360c1a0 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -487,7 +487,7 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op);
if (!II)
return;
- if (II->getIntrinsicID() != Intrinsic::log2 || !II->hasUnsafeAlgebra())
+ if (II->getIntrinsicID() != Intrinsic::log2 || !II->isFast())
return;
Log2 = II;
@@ -498,7 +498,8 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) {
Instruction *I = dyn_cast<Instruction>(OpLog2Of);
if (!I)
return;
- if (I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra())
+
+ if (I->getOpcode() != Instruction::FMul || !I->isFast())
return;
if (match(I->getOperand(0), m_SpecificFP(0.5)))
@@ -601,7 +602,7 @@ Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, Constant *C,
}
if (R) {
- R->setHasUnsafeAlgebra(true);
+ R->setFast(true);
InsertNewInstWith(R, *InsertBefore);
}
@@ -622,7 +623,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
- bool AllowReassociate = I.hasUnsafeAlgebra();
+ bool AllowReassociate = I.isFast();
// Simplify mul instructions with a constant RHS.
if (isa<Constant>(Op1)) {
@@ -1341,7 +1342,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
if (Instruction *R = FoldOpIntoSelect(I, SI))
return R;
- bool AllowReassociate = I.hasUnsafeAlgebra();
+ bool AllowReassociate = I.isFast();
bool AllowReciprocal = I.hasAllowReciprocal();
if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 45541c9adc0e..44bbb84686ab 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -310,6 +310,40 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
}
}
+// If this is a bitwise operator or add with a constant RHS we might be able
+// to pull it through a shift.
+static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift,
+ BinaryOperator *BO,
+ const APInt &C) {
+ bool IsValid = true; // Valid only for And, Or Xor,
+ bool HighBitSet = false; // Transform ifhigh bit of constant set?
+
+ switch (BO->getOpcode()) {
+ default: IsValid = false; break; // Do not perform transform!
+ case Instruction::Add:
+ IsValid = Shift.getOpcode() == Instruction::Shl;
+ break;
+ case Instruction::Or:
+ case Instruction::Xor:
+ HighBitSet = false;
+ break;
+ case Instruction::And:
+ HighBitSet = true;
+ break;
+ }
+
+ // If this is a signed shift right, and the high bit is modified
+ // by the logical operation, do not perform the transformation.
+ // The HighBitSet boolean indicates the value of the high bit of
+ // the constant which would cause it to be modified for this
+ // operation.
+ //
+ if (IsValid && Shift.getOpcode() == Instruction::AShr)
+ IsValid = C.isNegative() == HighBitSet;
+
+ return IsValid;
+}
+
Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
BinaryOperator &I) {
bool isLeftShift = I.getOpcode() == Instruction::Shl;
@@ -472,33 +506,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
// shift is the only use, we can pull it out of the shift.
const APInt *Op0C;
if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
- bool isValid = true; // Valid only for And, Or, Xor
- bool highBitSet = false; // Transform if high bit of constant set?
-
- switch (Op0BO->getOpcode()) {
- default: isValid = false; break; // Do not perform transform!
- case Instruction::Add:
- isValid = isLeftShift;
- break;
- case Instruction::Or:
- case Instruction::Xor:
- highBitSet = false;
- break;
- case Instruction::And:
- highBitSet = true;
- break;
- }
-
- // If this is a signed shift right, and the high bit is modified
- // by the logical operation, do not perform the transformation.
- // The highBitSet boolean indicates the value of the high bit of
- // the constant which would cause it to be modified for this
- // operation.
- //
- if (isValid && I.getOpcode() == Instruction::AShr)
- isValid = Op0C->isNegative() == highBitSet;
-
- if (isValid) {
+ if (canShiftBinOpWithConstantRHS(I, Op0BO, *Op0C)) {
Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
cast<Constant>(Op0BO->getOperand(1)), Op1);
@@ -525,6 +533,53 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
return BinaryOperator::CreateSub(NewRHS, NewShift);
}
}
+
+ // If we have a select that conditionally executes some binary operator,
+ // see if we can pull it the select and operator through the shift.
+ //
+ // For example, turning:
+ // shl (select C, (add X, C1), X), C2
+ // Into:
+ // Y = shl X, C2
+ // select C, (add Y, C1 << C2), Y
+ Value *Cond;
+ BinaryOperator *TBO;
+ Value *FalseVal;
+ if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
+ m_Value(FalseVal)))) {
+ const APInt *C;
+ if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
+ match(TBO->getOperand(1), m_APInt(C)) &&
+ canShiftBinOpWithConstantRHS(I, TBO, *C)) {
+ Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
+ cast<Constant>(TBO->getOperand(1)), Op1);
+
+ Value *NewShift =
+ Builder.CreateBinOp(I.getOpcode(), FalseVal, Op1);
+ Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift,
+ NewRHS);
+ return SelectInst::Create(Cond, NewOp, NewShift);
+ }
+ }
+
+ BinaryOperator *FBO;
+ Value *TrueVal;
+ if (match(Op0, m_Select(m_Value(Cond), m_Value(TrueVal),
+ m_OneUse(m_BinOp(FBO))))) {
+ const APInt *C;
+ if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
+ match(FBO->getOperand(1), m_APInt(C)) &&
+ canShiftBinOpWithConstantRHS(I, FBO, *C)) {
+ Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
+ cast<Constant>(FBO->getOperand(1)), Op1);
+
+ Value *NewShift =
+ Builder.CreateBinOp(I.getOpcode(), TrueVal, Op1);
+ Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift,
+ NewRHS);
+ return SelectInst::Create(Cond, NewShift, NewOp);
+ }
+ }
}
return nullptr;
diff --git a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 11a43e803a99..c92d48396c84 100644
--- a/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -844,8 +844,9 @@ public:
PGOUseFunc(Function &Func, Module *Modu,
std::unordered_multimap<Comdat *, GlobalValue *> &ComdatMembers,
BranchProbabilityInfo *BPI = nullptr,
- BlockFrequencyInfo *BFI = nullptr)
- : F(Func), M(Modu), FuncInfo(Func, ComdatMembers, false, BPI, BFI),
+ BlockFrequencyInfo *BFIin = nullptr)
+ : F(Func), M(Modu), BFI(BFIin),
+ FuncInfo(Func, ComdatMembers, false, BPI, BFIin),
FreqAttr(FFA_Normal) {}
// Read counts for the instrumented BB from profile.
@@ -863,6 +864,9 @@ public:
// Annotate the value profile call sites for one value kind.
void annotateValueSites(uint32_t Kind);
+ // Annotate the irreducible loop header weights.
+ void annotateIrrLoopHeaderWeights();
+
// The hotness of the function from the profile count.
enum FuncFreqAttr { FFA_Normal, FFA_Cold, FFA_Hot };
@@ -894,6 +898,7 @@ public:
private:
Function &F;
Module *M;
+ BlockFrequencyInfo *BFI;
// This member stores the shared information with class PGOGenFunc.
FuncPGOInstrumentation<PGOUseEdge, UseBBInfo> FuncInfo;
@@ -1183,6 +1188,18 @@ void PGOUseFunc::setBranchWeights() {
}
}
+void PGOUseFunc::annotateIrrLoopHeaderWeights() {
+ DEBUG(dbgs() << "\nAnnotating irreducible loop header weights.\n");
+ // Find irr loop headers
+ for (auto &BB : F) {
+ if (BFI->isIrrLoopHeader(&BB)) {
+ TerminatorInst *TI = BB.getTerminator();
+ const UseBBInfo &BBCountInfo = getBBInfo(&BB);
+ setIrrLoopHeaderMetadata(M, TI, BBCountInfo.CountValue);
+ }
+ }
+}
+
void SelectInstVisitor::instrumentOneSelectInst(SelectInst &SI) {
Module *M = F.getParent();
IRBuilder<> Builder(&SI);
@@ -1441,6 +1458,7 @@ static bool annotateAllFunctions(
Func.populateCounters();
Func.setBranchWeights();
Func.annotateValueSites();
+ Func.annotateIrrLoopHeaderWeights();
PGOUseFunc::FuncFreqAttr FreqAttr = Func.getFuncFreqAttr();
if (FreqAttr == PGOUseFunc::FFA_Cold)
ColdFunctions.push_back(&F);
@@ -1582,6 +1600,12 @@ void llvm::setProfMetadata(Module *M, Instruction *TI,
namespace llvm {
+void setIrrLoopHeaderMetadata(Module *M, Instruction *TI, uint64_t Count) {
+ MDBuilder MDB(M->getContext());
+ TI->setMetadata(llvm::LLVMContext::MD_irr_loop,
+ MDB.createIrrLoopHeaderWeight(Count));
+}
+
template <> struct GraphTraits<PGOUseFunc *> {
using NodeRef = const BasicBlock *;
using ChildIteratorType = succ_const_iterator;
diff --git a/lib/Transforms/Scalar/ADCE.cpp b/lib/Transforms/Scalar/ADCE.cpp
index f04d0f05ffc7..1e683db50206 100644
--- a/lib/Transforms/Scalar/ADCE.cpp
+++ b/lib/Transforms/Scalar/ADCE.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -118,7 +119,8 @@ class AggressiveDeadCodeElimination {
PostDominatorTree &PDT;
/// Mapping of blocks to associated information, an element in BlockInfoVec.
- DenseMap<BasicBlock *, BlockInfoType> BlockInfo;
+ /// Use MapVector to get deterministic iteration order.
+ MapVector<BasicBlock *, BlockInfoType> BlockInfo;
bool isLive(BasicBlock *BB) { return BlockInfo[BB].Live; }
/// Mapping of instructions to associated information.
diff --git a/lib/Transforms/Scalar/CMakeLists.txt b/lib/Transforms/Scalar/CMakeLists.txt
index d79ae851005d..6a27fbca8b78 100644
--- a/lib/Transforms/Scalar/CMakeLists.txt
+++ b/lib/Transforms/Scalar/CMakeLists.txt
@@ -2,6 +2,7 @@ add_llvm_library(LLVMScalarOpts
ADCE.cpp
AlignmentFromAssumptions.cpp
BDCE.cpp
+ CallSiteSplitting.cpp
ConstantHoisting.cpp
ConstantProp.cpp
CorrelatedValuePropagation.cpp
diff --git a/lib/Transforms/Scalar/CallSiteSplitting.cpp b/lib/Transforms/Scalar/CallSiteSplitting.cpp
new file mode 100644
index 000000000000..b70ed8d7d4cd
--- /dev/null
+++ b/lib/Transforms/Scalar/CallSiteSplitting.cpp
@@ -0,0 +1,492 @@
+//===- CallSiteSplitting.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a transformation that tries to split a call-site to pass
+// more constrained arguments if its argument is predicated in the control flow
+// so that we can expose better context to the later passes (e.g, inliner, jump
+// threading, or IPA-CP based function cloning, etc.).
+// As of now we support two cases :
+//
+// 1) If a call site is dominated by an OR condition and if any of its arguments
+// are predicated on this OR condition, try to split the condition with more
+// constrained arguments. For example, in the code below, we try to split the
+// call site since we can predicate the argument(ptr) based on the OR condition.
+//
+// Split from :
+// if (!ptr || c)
+// callee(ptr);
+// to :
+// if (!ptr)
+// callee(null) // set the known constant value
+// else if (c)
+// callee(nonnull ptr) // set non-null attribute in the argument
+//
+// 2) We can also split a call-site based on constant incoming values of a PHI
+// For example,
+// from :
+// Header:
+// %c = icmp eq i32 %i1, %i2
+// br i1 %c, label %Tail, label %TBB
+// TBB:
+// br label Tail%
+// Tail:
+// %p = phi i32 [ 0, %Header], [ 1, %TBB]
+// call void @bar(i32 %p)
+// to
+// Header:
+// %c = icmp eq i32 %i1, %i2
+// br i1 %c, label %Tail-split0, label %TBB
+// TBB:
+// br label %Tail-split1
+// Tail-split0:
+// call void @bar(i32 0)
+// br label %Tail
+// Tail-split1:
+// call void @bar(i32 1)
+// br label %Tail
+// Tail:
+// %p = phi i32 [ 0, %Tail-split0 ], [ 1, %Tail-split1 ]
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar/CallSiteSplitting.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+
+using namespace llvm;
+using namespace PatternMatch;
+
+#define DEBUG_TYPE "callsite-splitting"
+
+STATISTIC(NumCallSiteSplit, "Number of call-site split");
+
+static void addNonNullAttribute(Instruction *CallI, Instruction *&NewCallI,
+ Value *Op) {
+ if (!NewCallI)
+ NewCallI = CallI->clone();
+ CallSite CS(NewCallI);
+ unsigned ArgNo = 0;
+ for (auto &I : CS.args()) {
+ if (&*I == Op)
+ CS.addParamAttr(ArgNo, Attribute::NonNull);
+ ++ArgNo;
+ }
+}
+
+static void setConstantInArgument(Instruction *CallI, Instruction *&NewCallI,
+ Value *Op, Constant *ConstValue) {
+ if (!NewCallI)
+ NewCallI = CallI->clone();
+ CallSite CS(NewCallI);
+ unsigned ArgNo = 0;
+ for (auto &I : CS.args()) {
+ if (&*I == Op)
+ CS.setArgument(ArgNo, ConstValue);
+ ++ArgNo;
+ }
+}
+
+static bool createCallSitesOnOrPredicatedArgument(
+ CallSite CS, Instruction *&NewCSTakenFromHeader,
+ Instruction *&NewCSTakenFromNextCond,
+ SmallVectorImpl<BranchInst *> &BranchInsts, BasicBlock *HeaderBB) {
+ assert(BranchInsts.size() <= 2 &&
+ "Unexpected number of blocks in the OR predicated condition");
+ Instruction *Instr = CS.getInstruction();
+ BasicBlock *CallSiteBB = Instr->getParent();
+ TerminatorInst *HeaderTI = HeaderBB->getTerminator();
+ bool IsCSInTakenPath = CallSiteBB == HeaderTI->getSuccessor(0);
+
+ for (unsigned I = 0, E = BranchInsts.size(); I != E; ++I) {
+ BranchInst *PBI = BranchInsts[I];
+ assert(isa<ICmpInst>(PBI->getCondition()) &&
+ "Unexpected condition in a conditional branch.");
+ ICmpInst *Cmp = cast<ICmpInst>(PBI->getCondition());
+ Value *Arg = Cmp->getOperand(0);
+ assert(isa<Constant>(Cmp->getOperand(1)) &&
+ "Expected op1 to be a constant.");
+ Constant *ConstVal = cast<Constant>(Cmp->getOperand(1));
+ CmpInst::Predicate Pred = Cmp->getPredicate();
+
+ if (PBI->getParent() == HeaderBB) {
+ Instruction *&CallTakenFromHeader =
+ IsCSInTakenPath ? NewCSTakenFromHeader : NewCSTakenFromNextCond;
+ Instruction *&CallUntakenFromHeader =
+ IsCSInTakenPath ? NewCSTakenFromNextCond : NewCSTakenFromHeader;
+
+ assert((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
+ "Unexpected predicate in an OR condition");
+
+ // Set the constant value for agruments in the call predicated based on
+ // the OR condition.
+ Instruction *&CallToSetConst = Pred == ICmpInst::ICMP_EQ
+ ? CallTakenFromHeader
+ : CallUntakenFromHeader;
+ setConstantInArgument(Instr, CallToSetConst, Arg, ConstVal);
+
+ // Add the NonNull attribute if compared with the null pointer.
+ if (ConstVal->getType()->isPointerTy() && ConstVal->isNullValue()) {
+ Instruction *&CallToSetAttr = Pred == ICmpInst::ICMP_EQ
+ ? CallUntakenFromHeader
+ : CallTakenFromHeader;
+ addNonNullAttribute(Instr, CallToSetAttr, Arg);
+ }
+ continue;
+ }
+
+ if (Pred == ICmpInst::ICMP_EQ) {
+ if (PBI->getSuccessor(0) == Instr->getParent()) {
+ // Set the constant value for the call taken from the second block in
+ // the OR condition.
+ setConstantInArgument(Instr, NewCSTakenFromNextCond, Arg, ConstVal);
+ } else {
+ // Add the NonNull attribute if compared with the null pointer for the
+ // call taken from the second block in the OR condition.
+ if (ConstVal->getType()->isPointerTy() && ConstVal->isNullValue())
+ addNonNullAttribute(Instr, NewCSTakenFromNextCond, Arg);
+ }
+ } else {
+ if (PBI->getSuccessor(0) == Instr->getParent()) {
+ // Add the NonNull attribute if compared with the null pointer for the
+ // call taken from the second block in the OR condition.
+ if (ConstVal->getType()->isPointerTy() && ConstVal->isNullValue())
+ addNonNullAttribute(Instr, NewCSTakenFromNextCond, Arg);
+ } else if (Pred == ICmpInst::ICMP_NE) {
+ // Set the constant value for the call in the untaken path from the
+ // header block.
+ setConstantInArgument(Instr, NewCSTakenFromNextCond, Arg, ConstVal);
+ } else
+ llvm_unreachable("Unexpected condition");
+ }
+ }
+ return NewCSTakenFromHeader || NewCSTakenFromNextCond;
+}
+
+static bool canSplitCallSite(CallSite CS) {
+ // FIXME: As of now we handle only CallInst. InvokeInst could be handled
+ // without too much effort.
+ Instruction *Instr = CS.getInstruction();
+ if (!isa<CallInst>(Instr))
+ return false;
+
+ // Allow splitting a call-site only when there is no instruction before the
+ // call-site in the basic block. Based on this constraint, we only clone the
+ // call instruction, and we do not move a call-site across any other
+ // instruction.
+ BasicBlock *CallSiteBB = Instr->getParent();
+ if (Instr != CallSiteBB->getFirstNonPHI())
+ return false;
+
+ pred_iterator PII = pred_begin(CallSiteBB);
+ pred_iterator PIE = pred_end(CallSiteBB);
+ unsigned NumPreds = std::distance(PII, PIE);
+
+ // Allow only one extra call-site. No more than two from one call-site.
+ if (NumPreds != 2)
+ return false;
+
+ // Cannot split an edge from an IndirectBrInst.
+ BasicBlock *Preds[2] = {*PII++, *PII};
+ if (isa<IndirectBrInst>(Preds[0]->getTerminator()) ||
+ isa<IndirectBrInst>(Preds[1]->getTerminator()))
+ return false;
+
+ return CallSiteBB->canSplitPredecessors();
+}
+
+/// Return true if the CS is split into its new predecessors which are directly
+/// hooked to each of its orignial predecessors pointed by PredBB1 and PredBB2.
+/// Note that PredBB1 and PredBB2 are decided in findPredicatedArgument(),
+/// especially for the OR predicated case where PredBB1 will point the header,
+/// and PredBB2 will point the the second compare block. CallInst1 and CallInst2
+/// will be the new call-sites placed in the new predecessors split for PredBB1
+/// and PredBB2, repectively. Therefore, CallInst1 will be the call-site placed
+/// between Header and Tail, and CallInst2 will be the call-site between TBB and
+/// Tail. For example, in the IR below with an OR condition, the call-site can
+/// be split
+///
+/// from :
+///
+/// Header:
+/// %c = icmp eq i32* %a, null
+/// br i1 %c %Tail, %TBB
+/// TBB:
+/// %c2 = icmp eq i32* %b, null
+/// br i1 %c %Tail, %End
+/// Tail:
+/// %ca = call i1 @callee (i32* %a, i32* %b)
+///
+/// to :
+///
+/// Header: // PredBB1 is Header
+/// %c = icmp eq i32* %a, null
+/// br i1 %c %Tail-split1, %TBB
+/// TBB: // PredBB2 is TBB
+/// %c2 = icmp eq i32* %b, null
+/// br i1 %c %Tail-split2, %End
+/// Tail-split1:
+/// %ca1 = call @callee (i32* null, i32* %b) // CallInst1
+/// br %Tail
+/// Tail-split2:
+/// %ca2 = call @callee (i32* nonnull %a, i32* null) // CallInst2
+/// br %Tail
+/// Tail:
+/// %p = phi i1 [%ca1, %Tail-split1],[%ca2, %Tail-split2]
+///
+/// Note that for an OR predicated case, CallInst1 and CallInst2 should be
+/// created with more constrained arguments in
+/// createCallSitesOnOrPredicatedArgument().
+static void splitCallSite(CallSite CS, BasicBlock *PredBB1, BasicBlock *PredBB2,
+ Instruction *CallInst1, Instruction *CallInst2) {
+ Instruction *Instr = CS.getInstruction();
+ BasicBlock *TailBB = Instr->getParent();
+ assert(Instr == (TailBB->getFirstNonPHI()) && "Unexpected call-site");
+
+ BasicBlock *SplitBlock1 =
+ SplitBlockPredecessors(TailBB, PredBB1, ".predBB1.split");
+ BasicBlock *SplitBlock2 =
+ SplitBlockPredecessors(TailBB, PredBB2, ".predBB2.split");
+
+ assert((SplitBlock1 && SplitBlock2) && "Unexpected new basic block split.");
+
+ if (!CallInst1)
+ CallInst1 = Instr->clone();
+ if (!CallInst2)
+ CallInst2 = Instr->clone();
+
+ CallInst1->insertBefore(&*SplitBlock1->getFirstInsertionPt());
+ CallInst2->insertBefore(&*SplitBlock2->getFirstInsertionPt());
+
+ CallSite CS1(CallInst1);
+ CallSite CS2(CallInst2);
+
+ // Handle PHIs used as arguments in the call-site.
+ for (auto &PI : *TailBB) {
+ PHINode *PN = dyn_cast<PHINode>(&PI);
+ if (!PN)
+ break;
+ unsigned ArgNo = 0;
+ for (auto &CI : CS.args()) {
+ if (&*CI == PN) {
+ CS1.setArgument(ArgNo, PN->getIncomingValueForBlock(SplitBlock1));
+ CS2.setArgument(ArgNo, PN->getIncomingValueForBlock(SplitBlock2));
+ }
+ ++ArgNo;
+ }
+ }
+
+ // Replace users of the original call with a PHI mering call-sites split.
+ if (Instr->getNumUses()) {
+ PHINode *PN = PHINode::Create(Instr->getType(), 2, "phi.call", Instr);
+ PN->addIncoming(CallInst1, SplitBlock1);
+ PN->addIncoming(CallInst2, SplitBlock2);
+ Instr->replaceAllUsesWith(PN);
+ }
+ DEBUG(dbgs() << "split call-site : " << *Instr << " into \n");
+ DEBUG(dbgs() << " " << *CallInst1 << " in " << SplitBlock1->getName()
+ << "\n");
+ DEBUG(dbgs() << " " << *CallInst2 << " in " << SplitBlock2->getName()
+ << "\n");
+ Instr->eraseFromParent();
+ NumCallSiteSplit++;
+}
+
+static bool isCondRelevantToAnyCallArgument(ICmpInst *Cmp, CallSite CS) {
+ assert(isa<Constant>(Cmp->getOperand(1)) && "Expected a constant operand.");
+ Value *Op0 = Cmp->getOperand(0);
+ unsigned ArgNo = 0;
+ for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E;
+ ++I, ++ArgNo) {
+ // Don't consider constant or arguments that are already known non-null.
+ if (isa<Constant>(*I) || CS.paramHasAttr(ArgNo, Attribute::NonNull))
+ continue;
+
+ if (*I == Op0)
+ return true;
+ }
+ return false;
+}
+
+static void findOrCondRelevantToCallArgument(
+ CallSite CS, BasicBlock *PredBB, BasicBlock *OtherPredBB,
+ SmallVectorImpl<BranchInst *> &BranchInsts, BasicBlock *&HeaderBB) {
+ auto *PBI = dyn_cast<BranchInst>(PredBB->getTerminator());
+ if (!PBI || !PBI->isConditional())
+ return;
+
+ if (PBI->getSuccessor(0) == OtherPredBB ||
+ PBI->getSuccessor(1) == OtherPredBB)
+ if (PredBB == OtherPredBB->getSinglePredecessor()) {
+ assert(!HeaderBB && "Expect to find only a single header block");
+ HeaderBB = PredBB;
+ }
+
+ CmpInst::Predicate Pred;
+ Value *Cond = PBI->getCondition();
+ if (!match(Cond, m_ICmp(Pred, m_Value(), m_Constant())))
+ return;
+ ICmpInst *Cmp = cast<ICmpInst>(Cond);
+ if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)
+ if (isCondRelevantToAnyCallArgument(Cmp, CS))
+ BranchInsts.push_back(PBI);
+}
+
+// Return true if the call-site has an argument which is a PHI with only
+// constant incoming values.
+static bool isPredicatedOnPHI(CallSite CS) {
+ Instruction *Instr = CS.getInstruction();
+ BasicBlock *Parent = Instr->getParent();
+ if (Instr != Parent->getFirstNonPHI())
+ return false;
+
+ for (auto &BI : *Parent) {
+ if (PHINode *PN = dyn_cast<PHINode>(&BI)) {
+ for (auto &I : CS.args())
+ if (&*I == PN) {
+ assert(PN->getNumIncomingValues() == 2 &&
+ "Unexpected number of incoming values");
+ if (PN->getIncomingBlock(0) == PN->getIncomingBlock(1))
+ return false;
+ if (PN->getIncomingValue(0) == PN->getIncomingValue(1))
+ continue;
+ if (isa<Constant>(PN->getIncomingValue(0)) &&
+ isa<Constant>(PN->getIncomingValue(1)))
+ return true;
+ }
+ }
+ break;
+ }
+ return false;
+}
+
+// Return true if an agument in CS is predicated on an 'or' condition.
+// Create new call-site with arguments constrained based on the OR condition.
+static bool findPredicatedOnOrCondition(CallSite CS, BasicBlock *PredBB1,
+ BasicBlock *PredBB2,
+ Instruction *&NewCallTakenFromHeader,
+ Instruction *&NewCallTakenFromNextCond,
+ BasicBlock *&HeaderBB) {
+ SmallVector<BranchInst *, 4> BranchInsts;
+ findOrCondRelevantToCallArgument(CS, PredBB1, PredBB2, BranchInsts, HeaderBB);
+ findOrCondRelevantToCallArgument(CS, PredBB2, PredBB1, BranchInsts, HeaderBB);
+ if (BranchInsts.empty() || !HeaderBB)
+ return false;
+
+ // If an OR condition is detected, try to create call sites with constrained
+ // arguments (e.g., NonNull attribute or constant value).
+ return createCallSitesOnOrPredicatedArgument(CS, NewCallTakenFromHeader,
+ NewCallTakenFromNextCond,
+ BranchInsts, HeaderBB);
+}
+
+static bool findPredicatedArgument(CallSite CS, Instruction *&CallInst1,
+ Instruction *&CallInst2,
+ BasicBlock *&PredBB1, BasicBlock *&PredBB2) {
+ BasicBlock *CallSiteBB = CS.getInstruction()->getParent();
+ pred_iterator PII = pred_begin(CallSiteBB);
+ pred_iterator PIE = pred_end(CallSiteBB);
+ assert(std::distance(PII, PIE) == 2 && "Expect only two predecessors.");
+ (void)PIE;
+ BasicBlock *Preds[2] = {*PII++, *PII};
+ BasicBlock *&HeaderBB = PredBB1;
+ if (!findPredicatedOnOrCondition(CS, Preds[0], Preds[1], CallInst1, CallInst2,
+ HeaderBB) &&
+ !isPredicatedOnPHI(CS))
+ return false;
+
+ if (!PredBB1)
+ PredBB1 = Preds[0];
+
+ PredBB2 = PredBB1 == Preds[0] ? Preds[1] : Preds[0];
+ return true;
+}
+
+static bool tryToSplitCallSite(CallSite CS) {
+ if (!CS.arg_size())
+ return false;
+
+ BasicBlock *PredBB1 = nullptr;
+ BasicBlock *PredBB2 = nullptr;
+ Instruction *CallInst1 = nullptr;
+ Instruction *CallInst2 = nullptr;
+ if (!canSplitCallSite(CS) ||
+ !findPredicatedArgument(CS, CallInst1, CallInst2, PredBB1, PredBB2)) {
+ assert(!CallInst1 && !CallInst2 && "Unexpected new call-sites cloned.");
+ return false;
+ }
+ splitCallSite(CS, PredBB1, PredBB2, CallInst1, CallInst2);
+ return true;
+}
+
+static bool doCallSiteSplitting(Function &F, TargetLibraryInfo &TLI) {
+ bool Changed = false;
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE;) {
+ BasicBlock &BB = *BI++;
+ for (BasicBlock::iterator II = BB.begin(), IE = BB.end(); II != IE;) {
+ Instruction *I = &*II++;
+ CallSite CS(cast<Value>(I));
+ if (!CS || isa<IntrinsicInst>(I) || isInstructionTriviallyDead(I, &TLI))
+ continue;
+
+ Function *Callee = CS.getCalledFunction();
+ if (!Callee || Callee->isDeclaration())
+ continue;
+ Changed |= tryToSplitCallSite(CS);
+ }
+ }
+ return Changed;
+}
+
+namespace {
+struct CallSiteSplittingLegacyPass : public FunctionPass {
+ static char ID;
+ CallSiteSplittingLegacyPass() : FunctionPass(ID) {
+ initializeCallSiteSplittingLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (skipFunction(F))
+ return false;
+
+ auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ return doCallSiteSplitting(F, TLI);
+ }
+};
+} // namespace
+
+char CallSiteSplittingLegacyPass::ID = 0;
+INITIALIZE_PASS_BEGIN(CallSiteSplittingLegacyPass, "callsite-splitting",
+ "Call-site splitting", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_END(CallSiteSplittingLegacyPass, "callsite-splitting",
+ "Call-site splitting", false, false)
+FunctionPass *llvm::createCallSiteSplittingPass() {
+ return new CallSiteSplittingLegacyPass();
+}
+
+PreservedAnalyses CallSiteSplittingPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
+
+ if (!doCallSiteSplitting(F, TLI))
+ return PreservedAnalyses::all();
+ PreservedAnalyses PA;
+ return PA;
+}
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 9ce42a068256..abb50f27f1cc 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -48,6 +48,7 @@
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
@@ -1624,6 +1625,15 @@ PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
if (DU.NarrowDef->use_empty())
DeadInsts.emplace_back(DU.NarrowDef);
}
+
+ // Attach any debug information to the new PHI. Since OrigPhi and WidePHI
+ // evaluate the same recurrence, we can just copy the debug info over.
+ SmallVector<DbgValueInst *, 1> DbgValues;
+ llvm::findDbgValues(DbgValues, OrigPhi);
+ auto *MDPhi = MetadataAsValue::get(WidePhi->getContext(),
+ ValueAsMetadata::get(WidePhi));
+ for (auto &DbgValue : DbgValues)
+ DbgValue->setOperand(0, MDPhi);
return WidePhi;
}
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index ade4fbbcb6f2..e6cab3f34cf0 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -192,11 +192,12 @@ JumpThreadingPass::JumpThreadingPass(int T) {
// P(cond == true ) = P(A) + P(cond == true | B) * P(B)
//
// which gives us:
-// P(A) <= P(c == true), i.e.
+// P(A) is less than P(cond == true), i.e.
// P(t == true) <= P(cond == true)
//
-// In other words, if we know P(cond == true), we know that P(t == true)
-// can not be greater than 1%.
+// In other words, if we know P(cond == true) is unlikely, we know
+// that P(t == true) is also unlikely.
+//
static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) {
BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
if (!CondBr)
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 6ca8d602302b..c60ec9f50f7a 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -62,6 +62,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -93,9 +94,8 @@ static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop,
static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
const LoopSafetyInfo *SafetyInfo,
OptimizationRemarkEmitter *ORE);
-static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
- const Loop *CurLoop, AliasSetTracker *CurAST,
- const LoopSafetyInfo *SafetyInfo,
+static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
+ const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
OptimizationRemarkEmitter *ORE);
static bool isSafeToExecuteUnconditionally(Instruction &Inst,
const DominatorTree *DT,
@@ -394,8 +394,12 @@ bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
//
if (isNotUsedInLoop(I, CurLoop, SafetyInfo) &&
canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo, ORE)) {
- ++II;
- Changed |= sink(I, LI, DT, CurLoop, CurAST, SafetyInfo, ORE);
+ if (sink(I, LI, DT, CurLoop, SafetyInfo, ORE)) {
+ ++II;
+ CurAST->deleteValue(&I);
+ I.eraseFromParent();
+ Changed = true;
+ }
}
}
}
@@ -717,26 +721,6 @@ static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop,
if (!BlockColors.empty() &&
BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
return false;
-
- // A PHI node where all of the incoming values are this instruction are
- // special -- they can just be RAUW'ed with the instruction and thus
- // don't require a use in the predecessor. This is a particular important
- // special case because it is the pattern found in LCSSA form.
- if (isTriviallyReplacablePHI(*PN, I)) {
- if (CurLoop->contains(PN))
- return false;
- else
- continue;
- }
-
- // Otherwise, PHI node uses occur in predecessor blocks if the incoming
- // values. Check for such a use being inside the loop.
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == &I)
- if (CurLoop->contains(PN->getIncomingBlock(i)))
- return false;
-
- continue;
}
if (CurLoop->contains(UI))
@@ -806,14 +790,96 @@ CloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN,
return New;
}
+static Instruction *sinkThroughTriviallyReplacablePHI(
+ PHINode *TPN, Instruction *I, LoopInfo *LI,
+ SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
+ const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop) {
+ assert(isTriviallyReplacablePHI(*TPN, *I) &&
+ "Expect only trivially replacalbe PHI");
+ BasicBlock *ExitBlock = TPN->getParent();
+ Instruction *New;
+ auto It = SunkCopies.find(ExitBlock);
+ if (It != SunkCopies.end())
+ New = It->second;
+ else
+ New = SunkCopies[ExitBlock] =
+ CloneInstructionInExitBlock(*I, *ExitBlock, *TPN, LI, SafetyInfo);
+ return New;
+}
+
+static bool canSplitPredecessors(PHINode *PN) {
+ BasicBlock *BB = PN->getParent();
+ if (!BB->canSplitPredecessors())
+ return false;
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *BBPred = *PI;
+ if (isa<IndirectBrInst>(BBPred->getTerminator()))
+ return false;
+ }
+ return true;
+}
+
+static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
+ LoopInfo *LI, const Loop *CurLoop) {
+#ifndef NDEBUG
+ SmallVector<BasicBlock *, 32> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+ SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
+ ExitBlocks.end());
+#endif
+ BasicBlock *ExitBB = PN->getParent();
+ assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
+
+ // Split predecessors of the loop exit to make instructions in the loop are
+ // exposed to exit blocks through trivially replacable PHIs while keeping the
+ // loop in the canonical form where each predecessor of each exit block should
+ // be contained within the loop. For example, this will convert the loop below
+ // from
+ //
+ // LB1:
+ // %v1 =
+ // br %LE, %LB2
+ // LB2:
+ // %v2 =
+ // br %LE, %LB1
+ // LE:
+ // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replacable
+ //
+ // to
+ //
+ // LB1:
+ // %v1 =
+ // br %LE.split, %LB2
+ // LB2:
+ // %v2 =
+ // br %LE.split2, %LB1
+ // LE.split:
+ // %p1 = phi [%v1, %LB1] <-- trivially replacable
+ // br %LE
+ // LE.split2:
+ // %p2 = phi [%v2, %LB2] <-- trivially replacable
+ // br %LE
+ // LE:
+ // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
+ //
+ SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
+ while (!PredBBs.empty()) {
+ BasicBlock *PredBB = *PredBBs.begin();
+ assert(CurLoop->contains(PredBB) &&
+ "Expect all predecessors are in the loop");
+ if (PN->getBasicBlockIndex(PredBB) >= 0)
+ SplitBlockPredecessors(ExitBB, PredBB, ".split.loop.exit", DT, LI, true);
+ PredBBs.remove(PredBB);
+ }
+}
+
/// When an instruction is found to only be used outside of the loop, this
/// function moves it to the exit blocks and patches up SSA form as needed.
/// This method is guaranteed to remove the original instruction from its
/// position, and may either delete it or move it to outside of the loop.
///
-static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
- const Loop *CurLoop, AliasSetTracker *CurAST,
- const LoopSafetyInfo *SafetyInfo,
+static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
+ const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
OptimizationRemarkEmitter *ORE) {
DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
ORE->emit([&]() {
@@ -828,57 +894,75 @@ static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
++NumSunk;
Changed = true;
-#ifndef NDEBUG
- SmallVector<BasicBlock *, 32> ExitBlocks;
- CurLoop->getUniqueExitBlocks(ExitBlocks);
- SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
- ExitBlocks.end());
-#endif
+ // Iterate over users to be ready for actual sinking. Replace users via
+ // unrechable blocks with undef and make all user PHIs trivially replcable.
+ SmallPtrSet<Instruction *, 8> VisitedUsers;
+ for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
+ auto *User = cast<Instruction>(*UI);
+ Use &U = UI.getUse();
+ ++UI;
- // Clones of this instruction. Don't create more than one per exit block!
- SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
+ if (VisitedUsers.count(User))
+ continue;
- // If this instruction is only used outside of the loop, then all users are
- // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
- // the instruction.
- while (!I.use_empty()) {
- Value::user_iterator UI = I.user_begin();
- auto *User = cast<Instruction>(*UI);
if (!DT->isReachableFromEntry(User->getParent())) {
User->replaceUsesOfWith(&I, UndefValue::get(I.getType()));
continue;
}
+
// The user must be a PHI node.
PHINode *PN = cast<PHINode>(User);
// Surprisingly, instructions can be used outside of loops without any
// exits. This can only happen in PHI nodes if the incoming block is
// unreachable.
- Use &U = UI.getUse();
BasicBlock *BB = PN->getIncomingBlock(U);
if (!DT->isReachableFromEntry(BB)) {
U = UndefValue::get(I.getType());
continue;
}
- BasicBlock *ExitBlock = PN->getParent();
- assert(ExitBlockSet.count(ExitBlock) &&
- "The LCSSA PHI is not in an exit block!");
+ VisitedUsers.insert(PN);
+ if (isTriviallyReplacablePHI(*PN, I))
+ continue;
- Instruction *New;
- auto It = SunkCopies.find(ExitBlock);
- if (It != SunkCopies.end())
- New = It->second;
- else
- New = SunkCopies[ExitBlock] =
- CloneInstructionInExitBlock(I, *ExitBlock, *PN, LI, SafetyInfo);
+ if (!canSplitPredecessors(PN))
+ return false;
+
+ // Split predecessors of the PHI so that we can make users trivially
+ // replacable.
+ splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop);
+ // Should rebuild the iterators, as they may be invalidated by
+ // splitPredecessorsOfLoopExit().
+ UI = I.user_begin();
+ UE = I.user_end();
+ }
+
+#ifndef NDEBUG
+ SmallVector<BasicBlock *, 32> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+ SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
+ ExitBlocks.end());
+#endif
+
+ // Clones of this instruction. Don't create more than one per exit block!
+ SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
+
+ // If this instruction is only used outside of the loop, then all users are
+ // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
+ // the instruction.
+ while (!I.use_empty()) {
+ Value::user_iterator UI = I.user_begin();
+ PHINode *PN = cast<PHINode>(*UI);
+ assert(ExitBlockSet.count(PN->getParent()) &&
+ "The LCSSA PHI is not in an exit block!");
+ // The PHI must be trivially replacable.
+ Instruction *New = sinkThroughTriviallyReplacablePHI(PN, &I, LI, SunkCopies,
+ SafetyInfo, CurLoop);
PN->replaceAllUsesWith(New);
PN->eraseFromParent();
}
-
- CurAST->deleteValue(&I);
- I.eraseFromParent();
return Changed;
}
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 413fb75d1725..eb5f3cc47cef 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1326,9 +1326,9 @@ static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
// step 2: detect instructions corresponding to "x.next = x >> 1"
if (!DefX || DefX->getOpcode() != Instruction::AShr)
return false;
- if (ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)))
- if (!Shft || !Shft->isOne())
- return false;
+ ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
+ if (!Shft || !Shft->isOne())
+ return false;
VarX = DefX->getOperand(0);
// step 3: Check the recurrence of variable X
diff --git a/lib/Transforms/Scalar/LoopPredication.cpp b/lib/Transforms/Scalar/LoopPredication.cpp
index 9a623be234fe..52dea3254e79 100644
--- a/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/lib/Transforms/Scalar/LoopPredication.cpp
@@ -174,6 +174,9 @@
using namespace llvm;
+static cl::opt<bool> EnableIVTruncation("loop-predication-enable-iv-truncation",
+ cl::Hidden, cl::init(true));
+
namespace {
class LoopPredication {
/// Represents an induction variable check:
@@ -186,6 +189,10 @@ class LoopPredication {
const SCEV *Limit)
: Pred(Pred), IV(IV), Limit(Limit) {}
LoopICmp() {}
+ void dump() {
+ dbgs() << "LoopICmp Pred = " << Pred << ", IV = " << *IV
+ << ", Limit = " << *Limit << "\n";
+ }
};
ScalarEvolution *SE;
@@ -195,6 +202,7 @@ class LoopPredication {
BasicBlock *Preheader;
LoopICmp LatchCheck;
+ bool isSupportedStep(const SCEV* Step);
Optional<LoopICmp> parseLoopICmp(ICmpInst *ICI) {
return parseLoopICmp(ICI->getPredicate(), ICI->getOperand(0),
ICI->getOperand(1));
@@ -204,14 +212,36 @@ class LoopPredication {
Optional<LoopICmp> parseLoopLatchICmp();
+ bool CanExpand(const SCEV* S);
Value *expandCheck(SCEVExpander &Expander, IRBuilder<> &Builder,
ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
Instruction *InsertAt);
Optional<Value *> widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander,
IRBuilder<> &Builder);
+ Optional<Value *> widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck,
+ LoopICmp RangeCheck,
+ SCEVExpander &Expander,
+ IRBuilder<> &Builder);
+
bool widenGuardConditions(IntrinsicInst *II, SCEVExpander &Expander);
+ // When the IV type is wider than the range operand type, we can still do loop
+ // predication, by generating SCEVs for the range and latch that are of the
+ // same type. We achieve this by generating a SCEV truncate expression for the
+ // latch IV. This is done iff truncation of the IV is a safe operation,
+ // without loss of information.
+ // Another way to achieve this is by generating a wider type SCEV for the
+ // range check operand, however, this needs a more involved check that
+ // operands do not overflow. This can lead to loss of information when the
+ // range operand is of the form: add i32 %offset, %iv. We need to prove that
+ // sext(x + y) is same as sext(x) + sext(y).
+ // This function returns true if we can safely represent the IV type in
+ // the RangeCheckType without loss of information.
+ bool isSafeToTruncateWideIVType(Type *RangeCheckType);
+ // Return the loopLatchCheck corresponding to the RangeCheckType if safe to do
+ // so.
+ Optional<LoopICmp> generateLoopLatchCheck(Type *RangeCheckType);
public:
LoopPredication(ScalarEvolution *SE) : SE(SE){};
bool runOnLoop(Loop *L);
@@ -301,53 +331,54 @@ Value *LoopPredication::expandCheck(SCEVExpander &Expander,
return Builder.CreateICmp(Pred, LHSV, RHSV);
}
-/// If ICI can be widened to a loop invariant condition emits the loop
-/// invariant condition in the loop preheader and return it, otherwise
-/// returns None.
-Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
- SCEVExpander &Expander,
- IRBuilder<> &Builder) {
- DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
- DEBUG(ICI->dump());
+Optional<LoopPredication::LoopICmp>
+LoopPredication::generateLoopLatchCheck(Type *RangeCheckType) {
- // parseLoopStructure guarantees that the latch condition is:
- // ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=.
- // We are looking for the range checks of the form:
- // i u< guardLimit
- auto RangeCheck = parseLoopICmp(ICI);
- if (!RangeCheck) {
- DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
+ auto *LatchType = LatchCheck.IV->getType();
+ if (RangeCheckType == LatchType)
+ return LatchCheck;
+ // For now, bail out if latch type is narrower than range type.
+ if (DL->getTypeSizeInBits(LatchType) < DL->getTypeSizeInBits(RangeCheckType))
return None;
- }
- if (RangeCheck->Pred != ICmpInst::ICMP_ULT) {
- DEBUG(dbgs() << "Unsupported range check predicate(" << RangeCheck->Pred
- << ")!\n");
+ if (!isSafeToTruncateWideIVType(RangeCheckType))
return None;
- }
- auto *RangeCheckIV = RangeCheck->IV;
- auto *Ty = RangeCheckIV->getType();
- if (Ty != LatchCheck.IV->getType()) {
- DEBUG(dbgs() << "Type mismatch between range check and latch IVs!\n");
+ // We can now safely identify the truncated version of the IV and limit for
+ // RangeCheckType.
+ LoopICmp NewLatchCheck;
+ NewLatchCheck.Pred = LatchCheck.Pred;
+ NewLatchCheck.IV = dyn_cast<SCEVAddRecExpr>(
+ SE->getTruncateExpr(LatchCheck.IV, RangeCheckType));
+ if (!NewLatchCheck.IV)
return None;
- }
- if (!RangeCheckIV->isAffine()) {
- DEBUG(dbgs() << "Range check IV is not affine!\n");
- return None;
- }
- auto *Step = RangeCheckIV->getStepRecurrence(*SE);
- if (Step != LatchCheck.IV->getStepRecurrence(*SE)) {
- DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
- return None;
- }
- assert(Step->isOne() && "must be one");
+ NewLatchCheck.Limit = SE->getTruncateExpr(LatchCheck.Limit, RangeCheckType);
+ DEBUG(dbgs() << "IV of type: " << *LatchType
+ << "can be represented as range check type:" << *RangeCheckType
+ << "\n");
+ DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n");
+ DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n");
+ return NewLatchCheck;
+}
+
+bool LoopPredication::isSupportedStep(const SCEV* Step) {
+ return Step->isOne();
+}
- // Generate the widened condition:
+bool LoopPredication::CanExpand(const SCEV* S) {
+ return SE->isLoopInvariant(S, L) && isSafeToExpand(S, *SE);
+}
+
+Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
+ LoopPredication::LoopICmp LatchCheck, LoopPredication::LoopICmp RangeCheck,
+ SCEVExpander &Expander, IRBuilder<> &Builder) {
+ auto *Ty = RangeCheck.IV->getType();
+ // Generate the widened condition for the forward loop:
// guardStart u< guardLimit &&
// latchLimit <pred> guardLimit - 1 - guardStart + latchStart
// where <pred> depends on the latch condition predicate. See the file
// header comment for the reasoning.
- const SCEV *GuardStart = RangeCheckIV->getStart();
- const SCEV *GuardLimit = RangeCheck->Limit;
+ // guardLimit - guardStart + latchStart - 1
+ const SCEV *GuardStart = RangeCheck.IV->getStart();
+ const SCEV *GuardLimit = RangeCheck.Limit;
const SCEV *LatchStart = LatchCheck.IV->getStart();
const SCEV *LatchLimit = LatchCheck.Limit;
@@ -355,7 +386,11 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
const SCEV *RHS =
SE->getAddExpr(SE->getMinusSCEV(GuardLimit, GuardStart),
SE->getMinusSCEV(LatchStart, SE->getOne(Ty)));
-
+ if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) ||
+ !CanExpand(LatchLimit) || !CanExpand(RHS)) {
+ DEBUG(dbgs() << "Can't expand limit check!\n");
+ return None;
+ }
ICmpInst::Predicate LimitCheckPred;
switch (LatchCheck.Pred) {
case ICmpInst::ICMP_ULT:
@@ -378,22 +413,68 @@ Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
DEBUG(dbgs() << "RHS: " << *RHS << "\n");
DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n");
- auto CanExpand = [this](const SCEV *S) {
- return SE->isLoopInvariant(S, L) && isSafeToExpand(S, *SE);
- };
- if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) ||
- !CanExpand(LatchLimit) || !CanExpand(RHS)) {
- DEBUG(dbgs() << "Can't expand limit check!\n");
- return None;
- }
-
Instruction *InsertAt = Preheader->getTerminator();
auto *LimitCheck =
expandCheck(Expander, Builder, LimitCheckPred, LatchLimit, RHS, InsertAt);
- auto *FirstIterationCheck = expandCheck(Expander, Builder, RangeCheck->Pred,
+ auto *FirstIterationCheck = expandCheck(Expander, Builder, RangeCheck.Pred,
GuardStart, GuardLimit, InsertAt);
return Builder.CreateAnd(FirstIterationCheck, LimitCheck);
}
+/// If ICI can be widened to a loop invariant condition emits the loop
+/// invariant condition in the loop preheader and return it, otherwise
+/// returns None.
+Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
+ SCEVExpander &Expander,
+ IRBuilder<> &Builder) {
+ DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
+ DEBUG(ICI->dump());
+
+ // parseLoopStructure guarantees that the latch condition is:
+ // ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=.
+ // We are looking for the range checks of the form:
+ // i u< guardLimit
+ auto RangeCheck = parseLoopICmp(ICI);
+ if (!RangeCheck) {
+ DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
+ return None;
+ }
+ DEBUG(dbgs() << "Guard check:\n");
+ DEBUG(RangeCheck->dump());
+ if (RangeCheck->Pred != ICmpInst::ICMP_ULT) {
+ DEBUG(dbgs() << "Unsupported range check predicate(" << RangeCheck->Pred
+ << ")!\n");
+ return None;
+ }
+ auto *RangeCheckIV = RangeCheck->IV;
+ if (!RangeCheckIV->isAffine()) {
+ DEBUG(dbgs() << "Range check IV is not affine!\n");
+ return None;
+ }
+ auto *Step = RangeCheckIV->getStepRecurrence(*SE);
+ // We cannot just compare with latch IV step because the latch and range IVs
+ // may have different types.
+ if (!isSupportedStep(Step)) {
+ DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
+ return None;
+ }
+ auto *Ty = RangeCheckIV->getType();
+ auto CurrLatchCheckOpt = generateLoopLatchCheck(Ty);
+ if (!CurrLatchCheckOpt) {
+ DEBUG(dbgs() << "Failed to generate a loop latch check "
+ "corresponding to range type: "
+ << *Ty << "\n");
+ return None;
+ }
+
+ LoopICmp CurrLatchCheck = *CurrLatchCheckOpt;
+ // At this point the range check step and latch step should have the same
+ // value and type.
+ assert(Step == CurrLatchCheck.IV->getStepRecurrence(*SE) &&
+ "Range and latch should have same step recurrence!");
+
+ return widenICmpRangeCheckIncrementingLoop(CurrLatchCheck, *RangeCheck,
+ Expander, Builder);
+}
bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard,
SCEVExpander &Expander) {
@@ -485,15 +566,6 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() {
return None;
}
- if (Result->Pred != ICmpInst::ICMP_ULT &&
- Result->Pred != ICmpInst::ICMP_SLT &&
- Result->Pred != ICmpInst::ICMP_ULE &&
- Result->Pred != ICmpInst::ICMP_SLE) {
- DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred
- << ")!\n");
- return None;
- }
-
// Check affine first, so if it's not we don't try to compute the step
// recurrence.
if (!Result->IV->isAffine()) {
@@ -502,14 +574,55 @@ Optional<LoopPredication::LoopICmp> LoopPredication::parseLoopLatchICmp() {
}
auto *Step = Result->IV->getStepRecurrence(*SE);
- if (!Step->isOne()) {
+ if (!isSupportedStep(Step)) {
DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n");
return None;
}
+ auto IsUnsupportedPredicate = [](const SCEV *Step, ICmpInst::Predicate Pred) {
+ assert(Step->isOne() && "expected Step to be one!");
+ return Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_SLT &&
+ Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_SLE;
+ };
+
+ if (IsUnsupportedPredicate(Step, Result->Pred)) {
+ DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred
+ << ")!\n");
+ return None;
+ }
return Result;
}
+// Returns true if its safe to truncate the IV to RangeCheckType.
+bool LoopPredication::isSafeToTruncateWideIVType(Type *RangeCheckType) {
+ if (!EnableIVTruncation)
+ return false;
+ assert(DL->getTypeSizeInBits(LatchCheck.IV->getType()) >
+ DL->getTypeSizeInBits(RangeCheckType) &&
+ "Expected latch check IV type to be larger than range check operand "
+ "type!");
+ // The start and end values of the IV should be known. This is to guarantee
+ // that truncating the wide type will not lose information.
+ auto *Limit = dyn_cast<SCEVConstant>(LatchCheck.Limit);
+ auto *Start = dyn_cast<SCEVConstant>(LatchCheck.IV->getStart());
+ if (!Limit || !Start)
+ return false;
+ // This check makes sure that the IV does not change sign during loop
+ // iterations. Consider latchType = i64, LatchStart = 5, Pred = ICMP_SGE,
+ // LatchEnd = 2, rangeCheckType = i32. If it's not a monotonic predicate, the
+ // IV wraps around, and the truncation of the IV would lose the range of
+ // iterations between 2^32 and 2^64.
+ bool Increasing;
+ if (!SE->isMonotonicPredicate(LatchCheck.IV, LatchCheck.Pred, Increasing))
+ return false;
+ // The active bits should be less than the bits in the RangeCheckType. This
+ // guarantees that truncating the latch check to RangeCheckType is a safe
+ // operation.
+ auto RangeCheckTypeBitSize = DL->getTypeSizeInBits(RangeCheckType);
+ return Start->getAPInt().getActiveBits() < RangeCheckTypeBitSize &&
+ Limit->getAPInt().getActiveBits() < RangeCheckTypeBitSize;
+}
+
bool LoopPredication::runOnLoop(Loop *Loop) {
L = Loop;
@@ -535,6 +648,9 @@ bool LoopPredication::runOnLoop(Loop *Loop) {
return false;
LatchCheck = *LatchCheckOpt;
+ DEBUG(dbgs() << "Latch check:\n");
+ DEBUG(LatchCheck.dump());
+
// Collect all the guards into a vector and process later, so as not
// to invalidate the instruction iterator.
SmallVector<IntrinsicInst *, 4> Guards;
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index bbb179d3790c..7f03f2379e78 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1037,7 +1037,7 @@ struct LSRFixup {
Value *OperandValToReplace = nullptr;
/// If this user is to use the post-incremented value of an induction
- /// variable, this variable is non-null and holds the loop associated with the
+ /// variable, this set is non-empty and holds the loops associated with the
/// induction variable.
PostIncLoopSet PostIncLoops;
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index a44ca333fee6..1f32f9f24aac 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -145,8 +145,7 @@ XorOpnd::XorOpnd(Value *V) {
static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
if (V->hasOneUse() && isa<Instruction>(V) &&
cast<Instruction>(V)->getOpcode() == Opcode &&
- (!isa<FPMathOperator>(V) ||
- cast<Instruction>(V)->hasUnsafeAlgebra()))
+ (!isa<FPMathOperator>(V) || cast<Instruction>(V)->isFast()))
return cast<BinaryOperator>(V);
return nullptr;
}
@@ -156,8 +155,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1,
if (V->hasOneUse() && isa<Instruction>(V) &&
(cast<Instruction>(V)->getOpcode() == Opcode1 ||
cast<Instruction>(V)->getOpcode() == Opcode2) &&
- (!isa<FPMathOperator>(V) ||
- cast<Instruction>(V)->hasUnsafeAlgebra()))
+ (!isa<FPMathOperator>(V) || cast<Instruction>(V)->isFast()))
return cast<BinaryOperator>(V);
return nullptr;
}
@@ -565,7 +563,7 @@ static bool LinearizeExprTree(BinaryOperator *I,
assert((!isa<Instruction>(Op) ||
cast<Instruction>(Op)->getOpcode() != Opcode
|| (isa<FPMathOperator>(Op) &&
- !cast<Instruction>(Op)->hasUnsafeAlgebra())) &&
+ !cast<Instruction>(Op)->isFast())) &&
"Should have been handled above!");
assert(Op->hasOneUse() && "Has uses outside the expression tree!");
@@ -2017,8 +2015,8 @@ void ReassociatePass::OptimizeInst(Instruction *I) {
if (I->isCommutative())
canonicalizeOperands(I);
- // Don't optimize floating point instructions that don't have unsafe algebra.
- if (I->getType()->isFPOrFPVectorTy() && !I->hasUnsafeAlgebra())
+ // Don't optimize floating-point instructions unless they are 'fast'.
+ if (I->getType()->isFPOrFPVectorTy() && !I->isFast())
return;
// Do not reassociate boolean (i1) expressions. We want to preserve the
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 1ca77cfec329..44acfc885797 100644
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -125,10 +125,10 @@ struct RewriteStatepointsForGC : public ModulePass {
Changed |= runOnFunction(F);
if (Changed) {
- // stripNonValidAttributesAndMetadata asserts that shouldRewriteStatepointsIn
+ // stripNonValidData asserts that shouldRewriteStatepointsIn
// returns true for at least one function in the module. Since at least
// one function changed, we know that the precondition is satisfied.
- stripNonValidAttributesAndMetadata(M);
+ stripNonValidData(M);
}
return Changed;
@@ -146,15 +146,17 @@ struct RewriteStatepointsForGC : public ModulePass {
/// metadata implying dereferenceability that are no longer valid/correct after
/// RewriteStatepointsForGC has run. This is because semantically, after
/// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
- /// heap. stripNonValidAttributesAndMetadata (conservatively) restores
+ /// heap. stripNonValidData (conservatively) restores
/// correctness by erasing all attributes in the module that externally imply
/// dereferenceability. Similar reasoning also applies to the noalias
/// attributes and metadata. gc.statepoint can touch the entire heap including
/// noalias objects.
- void stripNonValidAttributesAndMetadata(Module &M);
+ /// Apart from attributes and metadata, we also remove instructions that imply
+ /// constant physical memory: llvm.invariant.start.
+ void stripNonValidData(Module &M);
- // Helpers for stripNonValidAttributesAndMetadata
- void stripNonValidAttributesAndMetadataFromBody(Function &F);
+ // Helpers for stripNonValidData
+ void stripNonValidDataFromBody(Function &F);
void stripNonValidAttributesFromPrototype(Function &F);
// Certain metadata on instructions are invalid after running RS4GC.
@@ -2385,14 +2387,30 @@ void RewriteStatepointsForGC::stripInvalidMetadataFromInstruction(Instruction &I
I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC);
}
-void RewriteStatepointsForGC::stripNonValidAttributesAndMetadataFromBody(Function &F) {
+void RewriteStatepointsForGC::stripNonValidDataFromBody(Function &F) {
if (F.empty())
return;
LLVMContext &Ctx = F.getContext();
MDBuilder Builder(Ctx);
+ // Set of invariantstart instructions that we need to remove.
+ // Use this to avoid invalidating the instruction iterator.
+ SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
+
for (Instruction &I : instructions(F)) {
+ // invariant.start on memory location implies that the referenced memory
+ // location is constant and unchanging. This is no longer true after
+ // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
+ // which frees the entire heap and the presence of invariant.start allows
+ // the optimizer to sink the load of a memory location past a statepoint,
+ // which is incorrect.
+ if (auto *II = dyn_cast<IntrinsicInst>(&I))
+ if (II->getIntrinsicID() == Intrinsic::invariant_start) {
+ InvariantStartInstructions.push_back(II);
+ continue;
+ }
+
if (const MDNode *MD = I.getMetadata(LLVMContext::MD_tbaa)) {
assert(MD->getNumOperands() < 5 && "unrecognized metadata shape!");
bool IsImmutableTBAA =
@@ -2422,6 +2440,12 @@ void RewriteStatepointsForGC::stripNonValidAttributesAndMetadataFromBody(Functio
RemoveNonValidAttrAtIndex(Ctx, CS, AttributeList::ReturnIndex);
}
}
+
+ // Delete the invariant.start instructions and RAUW undef.
+ for (auto *II : InvariantStartInstructions) {
+ II->replaceAllUsesWith(UndefValue::get(II->getType()));
+ II->eraseFromParent();
+ }
}
/// Returns true if this function should be rewritten by this pass. The main
@@ -2438,7 +2462,7 @@ static bool shouldRewriteStatepointsIn(Function &F) {
return false;
}
-void RewriteStatepointsForGC::stripNonValidAttributesAndMetadata(Module &M) {
+void RewriteStatepointsForGC::stripNonValidData(Module &M) {
#ifndef NDEBUG
assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
#endif
@@ -2447,7 +2471,7 @@ void RewriteStatepointsForGC::stripNonValidAttributesAndMetadata(Module &M) {
stripNonValidAttributesFromPrototype(F);
for (Function &F : M)
- stripNonValidAttributesAndMetadataFromBody(F);
+ stripNonValidDataFromBody(F);
}
bool RewriteStatepointsForGC::runOnFunction(Function &F) {
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index b968cb8c892b..6de6c8cce2c9 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -4133,8 +4133,10 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
"new fragment is outside of original fragment");
Start -= OrigFragment->OffsetInBits;
}
- FragmentExpr =
- DIExpression::createFragmentExpression(Expr, Start, Size);
+ if (auto E = DIExpression::createFragmentExpression(Expr, Start, Size))
+ FragmentExpr = *E;
+ else
+ continue;
}
// Remove any existing intrinsics describing the same alloca.
diff --git a/lib/Transforms/Scalar/Scalar.cpp b/lib/Transforms/Scalar/Scalar.cpp
index c1034ace2068..8a5ae1b87312 100644
--- a/lib/Transforms/Scalar/Scalar.cpp
+++ b/lib/Transforms/Scalar/Scalar.cpp
@@ -35,6 +35,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeADCELegacyPassPass(Registry);
initializeBDCELegacyPassPass(Registry);
initializeAlignmentFromAssumptionsPass(Registry);
+ initializeCallSiteSplittingLegacyPassPass(Registry);
initializeConstantHoistingLegacyPassPass(Registry);
initializeConstantPropagationPass(Registry);
initializeCorrelatedValuePropagationPass(Registry);
diff --git a/lib/Transforms/Utils/FunctionImportUtils.cpp b/lib/Transforms/Utils/FunctionImportUtils.cpp
index fbb61ac1ae91..2e6fc4e8482e 100644
--- a/lib/Transforms/Utils/FunctionImportUtils.cpp
+++ b/lib/Transforms/Utils/FunctionImportUtils.cpp
@@ -203,6 +203,23 @@ FunctionImportGlobalProcessing::getLinkage(const GlobalValue *SGV,
}
void FunctionImportGlobalProcessing::processGlobalForThinLTO(GlobalValue &GV) {
+
+ // Check the summaries to see if the symbol gets resolved to a known local
+ // definition.
+ if (GV.hasName()) {
+ ValueInfo VI = ImportIndex.getValueInfo(GV.getGUID());
+ if (VI) {
+ // Need to check all summaries are local in case of hash collisions.
+ bool IsLocal = VI.getSummaryList().size() &&
+ llvm::all_of(VI.getSummaryList(),
+ [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ return Summary->isDSOLocal();
+ });
+ if (IsLocal)
+ GV.setDSOLocal(true);
+ }
+ }
+
bool DoPromote = false;
if (GV.hasLocalLinkage() &&
((DoPromote = shouldPromoteLocalToGlobal(&GV)) || isPerformingImport())) {
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index 8c643c93ec4d..89dbe4b8fdaf 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -1362,16 +1362,25 @@ void llvm::salvageDebugInfo(Instruction &I) {
SmallVector<DbgValueInst *, 1> DbgValues;
auto &M = *I.getModule();
- auto MDWrap = [&](Value *V) {
+ auto wrapMD = [&](Value *V) {
return MetadataAsValue::get(I.getContext(), ValueAsMetadata::get(V));
};
- if (isa<BitCastInst>(&I)) {
+ auto applyOffset = [&](DbgValueInst *DVI, uint64_t Offset) {
+ auto *DIExpr = DVI->getExpression();
+ DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
+ DIExpression::WithStackValue);
+ DVI->setOperand(0, wrapMD(I.getOperand(0)));
+ DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
+ DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
+ };
+
+ if (isa<BitCastInst>(&I) || isa<IntToPtrInst>(&I)) {
findDbgValues(DbgValues, &I);
for (auto *DVI : DbgValues) {
// Bitcasts are entirely irrelevant for debug info. Rewrite the dbg.value
// to use the cast's source.
- DVI->setOperand(0, MDWrap(I.getOperand(0)));
+ DVI->setOperand(0, wrapMD(I.getOperand(0)));
DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
}
} else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
@@ -1383,24 +1392,26 @@ void llvm::salvageDebugInfo(Instruction &I) {
// Rewrite a constant GEP into a DIExpression. Since we are performing
// arithmetic to compute the variable's *value* in the DIExpression, we
// need to mark the expression with a DW_OP_stack_value.
- if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) {
- auto *DIExpr = DVI->getExpression();
+ if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
// GEP offsets are i32 and thus always fit into an int64_t.
- DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref,
- Offset.getSExtValue(),
- DIExpression::WithStackValue);
- DVI->setOperand(0, MDWrap(I.getOperand(0)));
- DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
- DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
- }
+ applyOffset(DVI, Offset.getSExtValue());
}
+ } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
+ if (BI->getOpcode() == Instruction::Add)
+ if (auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)))
+ if (ConstInt->getBitWidth() <= 64) {
+ APInt Offset = ConstInt->getValue();
+ findDbgValues(DbgValues, &I);
+ for (auto *DVI : DbgValues)
+ applyOffset(DVI, Offset.getSExtValue());
+ }
} else if (isa<LoadInst>(&I)) {
findDbgValues(DbgValues, &I);
for (auto *DVI : DbgValues) {
// Rewrite the load into DW_OP_deref.
auto *DIExpr = DVI->getExpression();
DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
- DVI->setOperand(0, MDWrap(I.getOperand(0)));
+ DVI->setOperand(0, wrapMD(I.getOperand(0)));
DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
}
diff --git a/lib/Transforms/Utils/LoopUtils.cpp b/lib/Transforms/Utils/LoopUtils.cpp
index 13c0bfbcb2e9..0de6924e6354 100644
--- a/lib/Transforms/Utils/LoopUtils.cpp
+++ b/lib/Transforms/Utils/LoopUtils.cpp
@@ -432,7 +432,7 @@ RecurrenceDescriptor::isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
InstDesc &Prev, bool HasFunNoNaNAttr) {
bool FP = I->getType()->isFloatingPointTy();
Instruction *UAI = Prev.getUnsafeAlgebraInst();
- if (!UAI && FP && !I->hasUnsafeAlgebra())
+ if (!UAI && FP && !I->isFast())
UAI = I; // Found an unsafe (unvectorizable) algebra instruction.
switch (I->getOpcode()) {
@@ -660,11 +660,11 @@ Value *RecurrenceDescriptor::createMinMaxOp(IRBuilder<> &Builder,
break;
}
- // We only match FP sequences with unsafe algebra, so we can unconditionally
+ // We only match FP sequences that are 'fast', so we can unconditionally
// set it on any generated instructions.
IRBuilder<>::FastMathFlagGuard FMFG(Builder);
FastMathFlags FMF;
- FMF.setUnsafeAlgebra();
+ FMF.setFast();
Builder.setFastMathFlags(FMF);
Value *Cmp;
@@ -768,7 +768,7 @@ Value *InductionDescriptor::transform(IRBuilder<> &B, Value *Index,
// Floating point operations had to be 'fast' to enable the induction.
FastMathFlags Flags;
- Flags.setUnsafeAlgebra();
+ Flags.setFast();
Value *MulExp = B.CreateFMul(StepValue, Index);
if (isa<Instruction>(MulExp))
@@ -1338,7 +1338,7 @@ Optional<unsigned> llvm::getLoopEstimatedTripCount(Loop *L) {
static Value *addFastMathFlag(Value *V) {
if (isa<FPMathOperator>(V)) {
FastMathFlags Flags;
- Flags.setUnsafeAlgebra();
+ Flags.setFast();
cast<Instruction>(V)->setFastMathFlags(Flags);
}
return V;
@@ -1401,7 +1401,7 @@ Value *llvm::createSimpleTargetReduction(
RD::MinMaxRecurrenceKind MinMaxKind = RD::MRK_Invalid;
// TODO: Support creating ordered reductions.
FastMathFlags FMFUnsafe;
- FMFUnsafe.setUnsafeAlgebra();
+ FMFUnsafe.setFast();
switch (Opcode) {
case Instruction::Add:
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 3c4dae92ebf3..e0045e9f48a4 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -2901,7 +2901,9 @@ static bool mergeConditionalStoreToAddress(BasicBlock *PTB, BasicBlock *PFB,
else
return false;
}
- return N <= PHINodeFoldingThreshold;
+ // The store we want to merge is counted in N, so add 1 to make sure
+ // we're counting the instructions that would be left.
+ return N <= (PHINodeFoldingThreshold + 1);
};
if (!MergeCondStoresAggressively &&
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 33117659489c..a29b83717f35 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -1111,7 +1111,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
// Example: x = 1000, y = 0.001.
// pow(exp(x), y) = pow(inf, 0.001) = inf, whereas exp(x*y) = exp(1).
auto *OpC = dyn_cast<CallInst>(Op1);
- if (OpC && OpC->hasUnsafeAlgebra() && CI->hasUnsafeAlgebra()) {
+ if (OpC && OpC->isFast() && CI->isFast()) {
LibFunc Func;
Function *OpCCallee = OpC->getCalledFunction();
if (OpCCallee && TLI->getLibFunc(OpCCallee->getName(), Func) &&
@@ -1136,7 +1136,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
LibFunc_sqrtl)) {
// If -ffast-math:
// pow(x, -0.5) -> 1.0 / sqrt(x)
- if (CI->hasUnsafeAlgebra()) {
+ if (CI->isFast()) {
IRBuilder<>::FastMathFlagGuard Guard(B);
B.setFastMathFlags(CI->getFastMathFlags());
@@ -1157,7 +1157,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
LibFunc_sqrtl)) {
// In -ffast-math, pow(x, 0.5) -> sqrt(x).
- if (CI->hasUnsafeAlgebra()) {
+ if (CI->isFast()) {
IRBuilder<>::FastMathFlagGuard Guard(B);
B.setFastMathFlags(CI->getFastMathFlags());
@@ -1196,7 +1196,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0), Op1, "powrecip");
// In -ffast-math, generate repeated fmul instead of generating pow(x, n).
- if (CI->hasUnsafeAlgebra()) {
+ if (CI->isFast()) {
APFloat V = abs(Op2C->getValueAPF());
// We limit to a max of 7 fmul(s). Thus max exponent is 32.
// This transformation applies to integer exponents only.
@@ -1284,9 +1284,9 @@ Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) {
IRBuilder<>::FastMathFlagGuard Guard(B);
FastMathFlags FMF;
- if (CI->hasUnsafeAlgebra()) {
- // Unsafe algebra sets all fast-math-flags to true.
- FMF.setUnsafeAlgebra();
+ if (CI->isFast()) {
+ // If the call is 'fast', then anything we create here will also be 'fast'.
+ FMF.setFast();
} else {
// At a minimum, no-nans-fp-math must be true.
if (!CI->hasNoNaNs())
@@ -1317,13 +1317,13 @@ Value *LibCallSimplifier::optimizeLog(CallInst *CI, IRBuilder<> &B) {
if (UnsafeFPShrink && hasFloatVersion(Name))
Ret = optimizeUnaryDoubleFP(CI, B, true);
- if (!CI->hasUnsafeAlgebra())
+ if (!CI->isFast())
return Ret;
Value *Op1 = CI->getArgOperand(0);
auto *OpC = dyn_cast<CallInst>(Op1);
- // The earlier call must also be unsafe in order to do these transforms.
- if (!OpC || !OpC->hasUnsafeAlgebra())
+ // The earlier call must also be 'fast' in order to do these transforms.
+ if (!OpC || !OpC->isFast())
return Ret;
// log(pow(x,y)) -> y*log(x)
@@ -1333,7 +1333,7 @@ Value *LibCallSimplifier::optimizeLog(CallInst *CI, IRBuilder<> &B) {
IRBuilder<>::FastMathFlagGuard Guard(B);
FastMathFlags FMF;
- FMF.setUnsafeAlgebra();
+ FMF.setFast();
B.setFastMathFlags(FMF);
LibFunc Func;
@@ -1365,11 +1365,11 @@ Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilder<> &B) {
Callee->getIntrinsicID() == Intrinsic::sqrt))
Ret = optimizeUnaryDoubleFP(CI, B, true);
- if (!CI->hasUnsafeAlgebra())
+ if (!CI->isFast())
return Ret;
Instruction *I = dyn_cast<Instruction>(CI->getArgOperand(0));
- if (!I || I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra())
+ if (!I || I->getOpcode() != Instruction::FMul || !I->isFast())
return Ret;
// We're looking for a repeated factor in a multiplication tree,
@@ -1391,8 +1391,7 @@ Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilder<> &B) {
Value *OtherMul0, *OtherMul1;
if (match(Op0, m_FMul(m_Value(OtherMul0), m_Value(OtherMul1)))) {
// Pattern: sqrt((x * y) * z)
- if (OtherMul0 == OtherMul1 &&
- cast<Instruction>(Op0)->hasUnsafeAlgebra()) {
+ if (OtherMul0 == OtherMul1 && cast<Instruction>(Op0)->isFast()) {
// Matched: sqrt((x * x) * z)
RepeatOp = OtherMul0;
OtherOp = Op1;
@@ -1437,8 +1436,8 @@ Value *LibCallSimplifier::optimizeTan(CallInst *CI, IRBuilder<> &B) {
if (!OpC)
return Ret;
- // Both calls must allow unsafe optimizations in order to remove them.
- if (!CI->hasUnsafeAlgebra() || !OpC->hasUnsafeAlgebra())
+ // Both calls must be 'fast' in order to remove them.
+ if (!CI->isFast() || !OpC->isFast())
return Ret;
// tan(atan(x)) -> x
@@ -2167,10 +2166,10 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
// Command-line parameter overrides instruction attribute.
// This can't be moved to optimizeFloatingPointLibCall() because it may be
- // used by the intrinsic optimizations.
+ // used by the intrinsic optimizations.
if (EnableUnsafeFPShrink.getNumOccurrences() > 0)
UnsafeFPShrink = EnableUnsafeFPShrink;
- else if (isa<FPMathOperator>(CI) && CI->hasUnsafeAlgebra())
+ else if (isa<FPMathOperator>(CI) && CI->isFast())
UnsafeFPShrink = true;
// First, check for intrinsics.
diff --git a/lib/Transforms/Utils/SplitModule.cpp b/lib/Transforms/Utils/SplitModule.cpp
index 07157069518a..934a1bd73c24 100644
--- a/lib/Transforms/Utils/SplitModule.cpp
+++ b/lib/Transforms/Utils/SplitModule.cpp
@@ -141,15 +141,15 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
}
if (GV.hasLocalLinkage())
- addAllGlobalValueUsers(GVtoClusterMap, &GV, &GV);
- };
-
- std::for_each(M->begin(), M->end(), recordGVSet);
- std::for_each(M->global_begin(), M->global_end(), recordGVSet);
- std::for_each(M->alias_begin(), M->alias_end(), recordGVSet);
-
- // Assigned all GVs to merged clusters while balancing number of objects in
- // each.
+ addAllGlobalValueUsers(GVtoClusterMap, &GV, &GV);
+ };
+
+ llvm::for_each(M->functions(), recordGVSet);
+ llvm::for_each(M->globals(), recordGVSet);
+ llvm::for_each(M->aliases(), recordGVSet);
+
+ // Assigned all GVs to merged clusters while balancing number of objects in
+ // each.
auto CompareClusters = [](const std::pair<unsigned, unsigned> &a,
const std::pair<unsigned, unsigned> &b) {
if (a.second || b.second)
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index ca2f5a178e06..ed29ca0b5731 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -385,7 +385,7 @@ static unsigned getReciprocalPredBlockProb() { return 2; }
static Value *addFastMathFlag(Value *V) {
if (isa<FPMathOperator>(V)) {
FastMathFlags Flags;
- Flags.setUnsafeAlgebra();
+ Flags.setFast();
cast<Instruction>(V)->setFastMathFlags(Flags);
}
return V;
@@ -2720,7 +2720,7 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
// Floating point operations had to be 'fast' to enable the induction.
FastMathFlags Flags;
- Flags.setUnsafeAlgebra();
+ Flags.setFast();
Value *MulOp = Builder.CreateFMul(Cv, Step);
if (isa<Instruction>(MulOp))
@@ -5396,7 +5396,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
// operations, shuffles, or casts, as they don't change precision or
// semantics.
} else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
- !I.hasUnsafeAlgebra()) {
+ !I.isFast()) {
DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
Hints->setPotentiallyUnsafe();
}
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 5dcf5528ac92..4232252af36d 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -4880,7 +4880,7 @@ class HorizontalReduction {
case RK_Min:
case RK_Max:
return Opcode == Instruction::ICmp ||
- cast<Instruction>(I->getOperand(0))->hasUnsafeAlgebra();
+ cast<Instruction>(I->getOperand(0))->isFast();
case RK_UMin:
case RK_UMax:
assert(Opcode == Instruction::ICmp &&
@@ -5232,7 +5232,7 @@ public:
Value *VectorizedTree = nullptr;
IRBuilder<> Builder(ReductionRoot);
FastMathFlags Unsafe;
- Unsafe.setUnsafeAlgebra();
+ Unsafe.setFast();
Builder.setFastMathFlags(Unsafe);
unsigned i = 0;