diff options
author | Rafael Espindola <rafael.espindola@gmail.com> | 2014-02-18 15:33:12 +0000 |
---|---|---|
committer | Rafael Espindola <rafael.espindola@gmail.com> | 2014-02-18 15:33:12 +0000 |
commit | 39d8dcb53b806841c8455637c23549b24760ce80 (patch) | |
tree | acdcc1ff7aaca513151e76a4375f973ee2f6ef5e /lib | |
parent | 2678b21a88fa5c7f532be75710d6a80af9f83aee (diff) | |
download | llvm-39d8dcb53b806841c8455637c23549b24760ce80.tar.gz |
Rename some member variables from TD to DL.
TargetData was renamed DataLayout back in r165242.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201581 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Analysis/AliasAnalysis.cpp | 8 | ||||
-rw-r--r-- | lib/Analysis/BasicAliasAnalysis.cpp | 42 | ||||
-rw-r--r-- | lib/Analysis/IPA/InlineCost.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/IVUsers.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/LazyValueInfo.cpp | 8 | ||||
-rw-r--r-- | lib/Analysis/MemoryDependenceAnalysis.cpp | 22 | ||||
-rw-r--r-- | lib/Analysis/NoAliasAnalysis.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/PHITransAddr.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolution.cpp | 56 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolutionExpander.cpp | 20 | ||||
-rw-r--r-- | lib/CodeGen/IntrinsicLowering.cpp | 12 | ||||
-rw-r--r-- | lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/TargetLoweringBase.cpp | 10 | ||||
-rw-r--r-- | lib/ExecutionEngine/ExecutionEngine.cpp | 10 | ||||
-rw-r--r-- | lib/Transforms/Utils/InlineFunction.cpp | 18 |
15 files changed, 114 insertions, 114 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index a7b36dbcb0fb..01d2c76a60c3 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -416,9 +416,9 @@ AliasAnalysis::ModRefResult AliasAnalysis::callCapturesBefore(const Instruction *I, const AliasAnalysis::Location &MemLoc, DominatorTree *DT) { - if (!DT || !TD) return AliasAnalysis::ModRef; + if (!DT || !DL) return AliasAnalysis::ModRef; - const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD); + const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL); if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) || isa<Constant>(Object)) return AliasAnalysis::ModRef; @@ -472,7 +472,7 @@ AliasAnalysis::~AliasAnalysis() {} /// AliasAnalysis interface before any other methods are called. /// void AliasAnalysis::InitializeAliasAnalysis(Pass *P) { - TD = P->getAnalysisIfAvailable<DataLayout>(); + DL = P->getAnalysisIfAvailable<DataLayout>(); TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>(); AA = &P->getAnalysis<AliasAnalysis>(); } @@ -487,7 +487,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { /// if known, or a conservative value otherwise. /// uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) { - return TD ? TD->getTypeStoreSize(Ty) : UnknownSize; + return DL ? DL->getTypeStoreSize(Ty) : UnknownSize; } /// canBasicBlockModify - Return true if it is possible for execution of the diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 5f7dd98e1938..9ed8f08af42d 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -593,7 +593,7 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { SmallVector<const Value *, 16> Worklist; Worklist.push_back(Loc.Ptr); do { - const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD); + const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL); if (!Visited.insert(V)) { Visited.clear(); return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); @@ -698,7 +698,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && "AliasAnalysis query involving multiple functions!"); - const Value *Object = GetUnderlyingObject(Loc.Ptr, TD); + const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); // If this is a tail call and Loc.Ptr points to a stack location, we know that // the tail call cannot access or modify the local stack. @@ -805,7 +805,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, // LLVM's vld1 and vst1 intrinsics currently only support a single // vector register. uint64_t Size = - TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize; + DL ? DL->getTypeStoreSize(II->getType()) : UnknownSize; if (isNoAlias(Location(II->getArgOperand(0), Size, II->getMetadata(LLVMContext::MD_tbaa)), Loc)) @@ -814,7 +814,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, } case Intrinsic::arm_neon_vst1: { uint64_t Size = - TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; + DL ? DL->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; if (isNoAlias(Location(II->getArgOperand(0), Size, II->getMetadata(LLVMContext::MD_tbaa)), Loc)) @@ -877,7 +877,7 @@ static bool areVarIndicesEqual(SmallVectorImpl<VariableGEPIndex> &Indices1, /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction /// against another pointer. We know that V1 is a GEP, but we don't know -/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD), +/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL), /// UnderlyingV2 is the same for V2. /// AliasAnalysis::AliasResult @@ -911,13 +911,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, int64_t GEP2BaseOffset; SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; const Value *GEP2BasePtr = - DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); + DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL); const Value *GEP1BasePtr = - DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); + DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL); // DecomposeGEPExpression and GetUnderlyingObject should return the // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { - assert(TD == 0 && + assert(DL == 0 && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); return MayAlias; } @@ -937,17 +937,17 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, // exactly, see if the computed offset from the common pointer tells us // about the relation of the resulting pointer. const Value *GEP1BasePtr = - DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); + DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL); int64_t GEP2BaseOffset; SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; const Value *GEP2BasePtr = - DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); + DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL); // DecomposeGEPExpression and GetUnderlyingObject should return the // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { - assert(TD == 0 && + assert(DL == 0 && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); return MayAlias; } @@ -977,12 +977,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, return R; const Value *GEP1BasePtr = - DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); + DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL); // DecomposeGEPExpression and GetUnderlyingObject should return the // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1) { - assert(TD == 0 && + assert(DL == 0 && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); return MayAlias; } @@ -1215,8 +1215,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, return NoAlias; // Scalars cannot alias each other // Figure out what objects these things are pointing to if we can. - const Value *O1 = GetUnderlyingObject(V1, TD); - const Value *O2 = GetUnderlyingObject(V2, TD); + const Value *O1 = GetUnderlyingObject(V1, DL); + const Value *O2 = GetUnderlyingObject(V2, DL); // Null values in the default address space don't point to any object, so they // don't alias any other pointer. @@ -1265,9 +1265,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, // If the size of one access is larger than the entire object on the other // side, then we know such behavior is undefined and can assume no alias. - if (TD) - if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) || - (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI))) + if (DL) + if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) || + (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI))) return NoAlias; // Check the cache before climbing up use-def chains. This also terminates @@ -1319,9 +1319,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, // If both pointers are pointing into the same object and one of them // accesses is accessing the entire object, then the accesses must // overlap in some way. - if (TD && O1 == O2) - if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) || - (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI))) + if (DL && O1 == O2) + if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) || + (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI))) return AliasCache[Locs] = PartialAlias; AliasResult Result = diff --git a/lib/Analysis/IPA/InlineCost.cpp b/lib/Analysis/IPA/InlineCost.cpp index 920f0de2fa21..b80e51749433 100644 --- a/lib/Analysis/IPA/InlineCost.cpp +++ b/lib/Analysis/IPA/InlineCost.cpp @@ -1203,7 +1203,7 @@ INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", char InlineCostAnalysis::ID = 0; -InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {} +InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), DL(0) {} InlineCostAnalysis::~InlineCostAnalysis() {} @@ -1214,7 +1214,7 @@ void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { } bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) { - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TTI = &getAnalysis<TargetTransformInfo>(); return false; } @@ -1272,7 +1272,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee, DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n"); - CallAnalyzer CA(TD, *TTI, *Callee, Threshold); + CallAnalyzer CA(DL, *TTI, *Callee, Threshold); bool ShouldInline = CA.analyzeCall(CS); DEBUG(CA.dump()); diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index eacd4eb029de..bf03e6ad2371 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -123,14 +123,14 @@ bool IVUsers::AddUsersImpl(Instruction *I, // IVUsers is used by LSR which assumes that all SCEV expressions are safe to // pass to SCEVExpander. Expressions are not safe to expand if they represent // operations that are not safe to speculate, namely integer division. - if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, TD)) + if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL)) return false; // LSR is not APInt clean, do not touch integers bigger than 64-bits. // Also avoid creating IVs of non-native types. For example, we don't want a // 64-bit IV in 32-bit code just because the loop has one 64-bit cast. uint64_t Width = SE->getTypeSizeInBits(I->getType()); - if (Width > 64 || (TD && !TD->isLegalInteger(Width))) + if (Width > 64 || (DL && !DL->isLegalInteger(Width))) return false; // Get the symbolic expression for this instruction. @@ -234,7 +234,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { LI = &getAnalysis<LoopInfo>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); SE = &getAnalysis<ScalarEvolution>(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); // Find all uses of induction variables in this loop, and categorize // them by stride. Start by finding all of the PHI nodes in the header for diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp index b6970af4cdec..a07eef2c199e 100644 --- a/lib/Analysis/LazyValueInfo.cpp +++ b/lib/Analysis/LazyValueInfo.cpp @@ -1013,7 +1013,7 @@ bool LazyValueInfo::runOnFunction(Function &F) { if (PImpl) getCache(PImpl).clear(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); // Fully lazy. @@ -1073,7 +1073,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, // If we know the value is a constant, evaluate the conditional. Constant *Res = 0; if (Result.isConstant()) { - Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD, + Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL, TLI); if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res)) return ResCI->isZero() ? False : True; @@ -1115,14 +1115,14 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, if (Pred == ICmpInst::ICMP_EQ) { // !C1 == C -> false iff C1 == C. Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, - Result.getNotConstant(), C, TD, + Result.getNotConstant(), C, DL, TLI); if (Res->isNullValue()) return False; } else if (Pred == ICmpInst::ICMP_NE) { // !C1 != C -> true iff C1 == C. Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, - Result.getNotConstant(), C, TD, + Result.getNotConstant(), C, DL, TLI); if (Res->isNullValue()) return True; diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index dc07ac8c5d39..8811e22a68a9 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -87,7 +87,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { bool MemoryDependenceAnalysis::runOnFunction(Function &) { AA = &getAnalysis<AliasAnalysis>(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DT = DTWP ? &DTWP->getDomTree() : 0; @@ -258,17 +258,17 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase, int64_t &MemLocOffs, const LoadInst *LI, - const DataLayout *TD) { + const DataLayout *DL) { // If we have no target data, we can't do this. - if (TD == 0) return false; + if (DL == 0) return false; // If we haven't already computed the base/offset of MemLoc, do so now. if (MemLocBase == 0) - MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD); + MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL); unsigned Size = MemoryDependenceAnalysis:: getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size, - LI, *TD); + LI, *DL); return Size != 0; } @@ -282,7 +282,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc, unsigned MemoryDependenceAnalysis:: getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI, - const DataLayout &TD) { + const DataLayout &DL) { // We can only extend simple integer loads. if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0; @@ -295,7 +295,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, // Get the base of this load. int64_t LIOffs = 0; const Value *LIBase = - GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD); + GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL); // If the two pointers are not based on the same pointer, we can't tell that // they are related. @@ -331,7 +331,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, // If this load size is bigger than our known alignment or would not fit // into a native integer register, then we fail. if (NewLoadByteSize > LoadAlign || - !TD.fitsInLegalInteger(NewLoadByteSize*8)) + !DL.fitsInLegalInteger(NewLoadByteSize*8)) return 0; if (LIOffs+NewLoadByteSize > MemLocEnd && @@ -424,7 +424,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() && isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase, - MemLocOffset, LI, TD)) + MemLocOffset, LI, DL)) return MemDepResult::getClobber(Inst); continue; @@ -500,7 +500,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, // need to continue scanning until the malloc call. const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo(); if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) { - const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD); + const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL); if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr)) return MemDepResult::getDef(Inst); @@ -773,7 +773,7 @@ getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad, "Can't get pointer deps of a non-pointer!"); Result.clear(); - PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD); + PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL); // This is the set of blocks we've inspected, and the pointer we consider in // each block. Because of critical edges, we currently bail out if querying diff --git a/lib/Analysis/NoAliasAnalysis.cpp b/lib/Analysis/NoAliasAnalysis.cpp index 907e9621baed..5d6b2ea1ab82 100644 --- a/lib/Analysis/NoAliasAnalysis.cpp +++ b/lib/Analysis/NoAliasAnalysis.cpp @@ -36,7 +36,7 @@ namespace { virtual void initializePass() { // Note: NoAA does not call InitializeAliasAnalysis because it's // special and does not support chaining. - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); } virtual AliasResult alias(const Location &LocA, const Location &LocB) { diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp index bcff002fee6e..866f82a17a1c 100644 --- a/lib/Analysis/PHITransAddr.cpp +++ b/lib/Analysis/PHITransAddr.cpp @@ -229,7 +229,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB, return GEP; // Simplify the GEP to handle 'gep x, 0' -> x etc. - if (Value *V = SimplifyGEPInst(GEPOps, TD, TLI, DT)) { + if (Value *V = SimplifyGEPInst(GEPOps, DL, TLI, DT)) { for (unsigned i = 0, e = GEPOps.size(); i != e; ++i) RemoveInstInputs(GEPOps[i], InstInputs); @@ -285,7 +285,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB, } // See if the add simplifies away. - if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) { + if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT)) { // If we simplified the operands, the LHS is no longer an input, but Res // is. RemoveInstInputs(LHS, InstInputs); @@ -372,7 +372,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB, SmallVectorImpl<Instruction*> &NewInsts) { // See if we have a version of this value already available and dominating // PredBB. If so, there is no need to insert a new instance of it. - PHITransAddr Tmp(InVal, TD); + PHITransAddr Tmp(InVal, DL); if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT)) return Tmp.getAddr(); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 03fee8832401..f2523af9ab01 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -2663,12 +2663,12 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { // If we have DataLayout, we can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. - if (TD) - return getConstant(IntTy, TD->getTypeAllocSize(AllocTy)); + if (DL) + return getConstant(IntTy, DL->getTypeAllocSize(AllocTy)); Constant *C = ConstantExpr::getSizeOf(AllocTy); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) - if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI)) C = Folded; Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); assert(Ty == IntTy && "Effective SCEV type doesn't match"); @@ -2681,14 +2681,14 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, // If we have DataLayout, we can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. - if (TD) { + if (DL) { return getConstant(IntTy, - TD->getStructLayout(STy)->getElementOffset(FieldNo)); + DL->getStructLayout(STy)->getElementOffset(FieldNo)); } Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) - if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI)) C = Folded; Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); @@ -2736,8 +2736,8 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); // If we have a DataLayout, use it! - if (TD) - return TD->getTypeSizeInBits(Ty); + if (DL) + return DL->getTypeSizeInBits(Ty); // Integer types have fixed sizes. if (Ty->isIntegerTy()) @@ -2763,8 +2763,8 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { // The only other support type is pointer. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); - if (TD) - return TD->getIntPtrType(Ty); + if (DL) + return DL->getIntPtrType(Ty); // Without DataLayout, conservatively assume pointers are 64-bit. return Type::getInt64Ty(getContext()); @@ -3232,7 +3232,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { // PHI's incoming blocks are in a different loop, in which case doing so // risks breaking LCSSA form. Instcombine would normally zap these, but // it doesn't have DominatorTree information, so it may miss cases. - if (Value *V = SimplifyInstruction(PN, TD, TLI, DT)) + if (Value *V = SimplifyInstruction(PN, DL, TLI, DT)) if (LI->replacementPreservesLCSSAForm(PN, V)) return getSCEV(V); @@ -3503,7 +3503,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) { if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { // For a SCEVUnknown, ask ValueTracking. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); - ComputeMaskedBits(U->getValue(), Zeros, Ones, TD); + ComputeMaskedBits(U->getValue(), Zeros, Ones, DL); if (Ones == ~Zeros + 1) return setUnsignedRange(U, ConservativeResult); return setUnsignedRange(U, @@ -3653,9 +3653,9 @@ ScalarEvolution::getSignedRange(const SCEV *S) { if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { // For a SCEVUnknown, ask ValueTracking. - if (!U->getValue()->getType()->isIntegerTy() && !TD) + if (!U->getValue()->getType()->isIntegerTy() && !DL) return setSignedRange(U, ConservativeResult); - unsigned NS = ComputeNumSignBits(U->getValue(), TD); + unsigned NS = ComputeNumSignBits(U->getValue(), DL); if (NS <= 1) return setSignedRange(U, ConservativeResult); return setSignedRange(U, ConservativeResult.intersectWith( @@ -3762,7 +3762,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { unsigned TZ = A.countTrailingZeros(); unsigned BitWidth = A.getBitWidth(); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD); + ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, DL); APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); @@ -4956,7 +4956,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { /// reason, return null. static Constant *EvaluateExpression(Value *V, const Loop *L, DenseMap<Instruction *, Constant *> &Vals, - const DataLayout *TD, + const DataLayout *DL, const TargetLibraryInfo *TLI) { // Convenient constant check, but redundant for recursive calls. if (Constant *C = dyn_cast<Constant>(V)) return C; @@ -4983,7 +4983,7 @@ static Constant *EvaluateExpression(Value *V, const Loop *L, if (!Operands[i]) return 0; continue; } - Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI); + Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); Vals[Operand] = C; if (!C) return 0; Operands[i] = C; @@ -4991,12 +4991,12 @@ static Constant *EvaluateExpression(Value *V, const Loop *L, if (CmpInst *CI = dyn_cast<CmpInst>(I)) return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], - Operands[1], TD, TLI); + Operands[1], DL, TLI); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!LI->isVolatile()) - return ConstantFoldLoadFromConstPtr(Operands[0], TD); + return ConstantFoldLoadFromConstPtr(Operands[0], DL); } - return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD, + return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL, TLI); } @@ -5052,7 +5052,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, // Compute the value of the PHIs for the next iteration. // EvaluateExpression adds non-phi values to the CurrentIterVals map. DenseMap<Instruction *, Constant *> NextIterVals; - Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, + Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); if (NextPHI == 0) return 0; // Couldn't evaluate! @@ -5078,7 +5078,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, Constant *&NextPHI = NextIterVals[PHI]; if (!NextPHI) { // Not already computed. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); - NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI); + NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); } if (NextPHI != I->second) StoppedEvolving = false; @@ -5134,7 +5134,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals, - TD, TLI)); + DL, TLI)); // Couldn't symbolically evaluate. if (!CondVal) return getCouldNotCompute(); @@ -5164,7 +5164,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, if (NextPHI) continue; // Already computed! Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); - NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI); + NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); } CurrentIterVals.swap(NextIterVals); } @@ -5369,14 +5369,14 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { Constant *C = 0; if (const CmpInst *CI = dyn_cast<CmpInst>(I)) C = ConstantFoldCompareInstOperands(CI->getPredicate(), - Operands[0], Operands[1], TD, + Operands[0], Operands[1], DL, TLI); else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!LI->isVolatile()) - C = ConstantFoldLoadFromConstPtr(Operands[0], TD); + C = ConstantFoldLoadFromConstPtr(Operands[0], DL); } else C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), - Operands, TD, TLI); + Operands, DL, TLI); if (!C) return V; return getSCEV(C); } @@ -7385,7 +7385,7 @@ ScalarEvolution::ScalarEvolution() bool ScalarEvolution::runOnFunction(Function &F) { this->F = &F; LI = &getAnalysis<LoopInfo>(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); return false; diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 8c5405896312..b778c6e3467a 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -210,7 +210,7 @@ static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, - const DataLayout *TD) { + const DataLayout *DL) { // Everything is divisible by one. if (Factor->isOne()) return true; @@ -250,7 +250,7 @@ static bool FactorOutConstant(const SCEV *&S, // In a Mul, check if there is a constant operand which is a multiple // of the given factor. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { - if (TD) { + if (DL) { // With DataLayout, the size is known. Check if there is a constant // operand which is a multiple of the given factor. If so, we can // factor it. @@ -270,7 +270,7 @@ static bool FactorOutConstant(const SCEV *&S, for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { const SCEV *SOp = M->getOperand(i); const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); - if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && + if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) && Remainder->isZero()) { SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); NewMulOps[i] = SOp; @@ -285,12 +285,12 @@ static bool FactorOutConstant(const SCEV *&S, if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { const SCEV *Step = A->getStepRecurrence(SE); const SCEV *StepRem = SE.getConstant(Step->getType(), 0); - if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) + if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) return false; if (!StepRem->isZero()) return false; const SCEV *Start = A->getStart(); - if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) + if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) return false; S = SE.getAddRecExpr(Start, Step, A->getLoop(), A->getNoWrapFlags(SCEV::FlagNW)); @@ -404,8 +404,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, // without the other. SplitAddRecs(Ops, Ty, SE); - Type *IntPtrTy = SE.TD - ? SE.TD->getIntPtrType(PTy) + Type *IntPtrTy = SE.DL + ? SE.DL->getIntPtrType(PTy) : Type::getInt64Ty(PTy->getContext()); // Descend down the pointer's type and attempt to convert the other @@ -424,7 +424,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, for (unsigned i = 0, e = Ops.size(); i != e; ++i) { const SCEV *Op = Ops[i]; const SCEV *Remainder = SE.getConstant(Ty, 0); - if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { + if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) { // Op now has ElSize factored out. ScaledOps.push_back(Op); if (!Remainder->isZero()) @@ -458,13 +458,13 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, bool FoundFieldNo = false; // An empty struct has no fields. if (STy->getNumElements() == 0) break; - if (SE.TD) { + if (SE.DL) { // With DataLayout, field offsets are known. See if a constant offset // falls within any of the struct fields. if (Ops.empty()) break; if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) if (SE.getTypeSizeInBits(C->getType()) <= 64) { - const StructLayout &SL = *SE.TD->getStructLayout(STy); + const StructLayout &SL = *SE.DL->getStructLayout(STy); uint64_t FullOffset = C->getValue()->getZExtValue(); if (FullOffset < SL.getSizeInBytes()) { unsigned ElIdx = SL.getElementContainingOffset(FullOffset); diff --git a/lib/CodeGen/IntrinsicLowering.cpp b/lib/CodeGen/IntrinsicLowering.cpp index c38d4fbc5aa4..cd3899f510b1 100644 --- a/lib/CodeGen/IntrinsicLowering.cpp +++ b/lib/CodeGen/IntrinsicLowering.cpp @@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) { Type::getInt8PtrTy(Context), Type::getInt8PtrTy(Context), Type::getInt8PtrTy(Context), - TD.getIntPtrType(Context), (Type *)0); + DL.getIntPtrType(Context), (Type *)0); break; case Intrinsic::memmove: M.getOrInsertFunction("memmove", Type::getInt8PtrTy(Context), Type::getInt8PtrTy(Context), Type::getInt8PtrTy(Context), - TD.getIntPtrType(Context), (Type *)0); + DL.getIntPtrType(Context), (Type *)0); break; case Intrinsic::memset: M.getOrInsertFunction("memset", Type::getInt8PtrTy(Context), Type::getInt8PtrTy(Context), Type::getInt32Ty(M.getContext()), - TD.getIntPtrType(Context), (Type *)0); + DL.getIntPtrType(Context), (Type *)0); break; case Intrinsic::sqrt: EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl"); @@ -463,7 +463,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) { break; // Strip out annotate intrinsic case Intrinsic::memcpy: { - Type *IntPtr = TD.getIntPtrType(Context); + Type *IntPtr = DL.getIntPtrType(Context); Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr, /* isSigned */ false); Value *Ops[3]; @@ -474,7 +474,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) { break; } case Intrinsic::memmove: { - Type *IntPtr = TD.getIntPtrType(Context); + Type *IntPtr = DL.getIntPtrType(Context); Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr, /* isSigned */ false); Value *Ops[3]; @@ -486,7 +486,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) { } case Intrinsic::memset: { Value *Op0 = CI->getArgOperand(0); - Type *IntPtr = TD.getIntPtrType(Op0->getType()); + Type *IntPtr = DL.getIntPtrType(Op0->getType()); Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr, /* isSigned */ false); Value *Ops[3]; diff --git a/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp b/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp index f769b44efbb3..1120be8ed2ab 100644 --- a/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp +++ b/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp @@ -16,7 +16,7 @@ using namespace llvm; TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM) - : TD(TM.getDataLayout()) { + : DL(TM.getDataLayout()) { } TargetSelectionDAGInfo::~TargetSelectionDAGInfo() { diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 00b551b66f48..bdcb7216a56f 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -661,11 +661,11 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) { /// NOTE: The constructor takes ownership of TLOF. TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, const TargetLoweringObjectFile *tlof) - : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) { + : TM(tm), DL(TM.getDataLayout()), TLOF(*tlof) { initActions(); // Perform these initializations only once. - IsLittleEndian = TD->isLittleEndian(); + IsLittleEndian = DL->isLittleEndian(); MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8; MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = MaxStoresPerMemmoveOptSize = 4; @@ -802,7 +802,7 @@ MVT TargetLoweringBase::getPointerTy(uint32_t AS) const { } unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const { - return TD->getPointerSizeInBits(AS); + return DL->getPointerSizeInBits(AS); } unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const { @@ -811,7 +811,7 @@ unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const { } MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const { - return MVT::getIntegerVT(8*TD->getPointerSize(0)); + return MVT::getIntegerVT(8*DL->getPointerSize(0)); } EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const { @@ -1286,7 +1286,7 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, /// function arguments in the caller parameter area. This is the actual /// alignment, not its logarithm. unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const { - return TD->getABITypeAlignment(Ty); + return DL->getABITypeAlignment(Ty); } //===----------------------------------------------------------------------===// diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp index 88ec5b3e6a99..335efaf8a1bb 100644 --- a/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -590,8 +590,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { case Instruction::GetElementPtr: { // Compute the index GenericValue Result = getConstantValue(Op0); - APInt Offset(TD->getPointerSizeInBits(), 0); - cast<GEPOperator>(CE)->accumulateConstantOffset(*TD, Offset); + APInt Offset(DL->getPointerSizeInBits(), 0); + cast<GEPOperator>(CE)->accumulateConstantOffset(*DL, Offset); char* tmp = (char*) Result.PointerVal; Result = PTOGV(tmp + Offset.getSExtValue()); @@ -678,16 +678,16 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { } case Instruction::PtrToInt: { GenericValue GV = getConstantValue(Op0); - uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType()); + uint32_t PtrWidth = DL->getTypeSizeInBits(Op0->getType()); assert(PtrWidth <= 64 && "Bad pointer width"); GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal)); - uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType()); + uint32_t IntWidth = DL->getTypeSizeInBits(CE->getType()); GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth); return GV; } case Instruction::IntToPtr: { GenericValue GV = getConstantValue(Op0); - uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType()); + uint32_t PtrWidth = DL->getTypeSizeInBits(CE->getType()); GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth); assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width"); GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue())); diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp index e35a1d0b006d..d1d8870b7601 100644 --- a/lib/Transforms/Utils/InlineFunction.cpp +++ b/lib/Transforms/Utils/InlineFunction.cpp @@ -343,7 +343,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, // If the pointer is already known to be sufficiently aligned, or if we can // round it up to a larger alignment, then we don't need a temporary. if (getOrEnforceKnownAlignment(Arg, ByValAlignment, - IFI.TD) >= ByValAlignment) + IFI.DL) >= ByValAlignment) return Arg; // Otherwise, we have to make a memcpy to get a safe alignment. This is bad @@ -356,8 +356,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, // Create the alloca. If we have DataLayout, use nice alignment. unsigned Align = 1; - if (IFI.TD) - Align = IFI.TD->getPrefTypeAlignment(AggTy); + if (IFI.DL) + Align = IFI.DL->getPrefTypeAlignment(AggTy); // If the byval had an alignment specified, we *must* use at least that // alignment, as it is required by the byval argument (and uses of the @@ -377,11 +377,11 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall); Value *Size; - if (IFI.TD == 0) + if (IFI.DL == 0) Size = ConstantExpr::getSizeOf(AggTy); else Size = ConstantInt::get(Type::getInt64Ty(Context), - IFI.TD->getTypeStoreSize(AggTy)); + IFI.DL->getTypeStoreSize(AggTy)); // Always generate a memcpy of alignment 1 here because we don't know // the alignment of the src pointer. Other optimizations can infer @@ -599,7 +599,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, // happy with whatever the cloner can do. CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, /*ModuleLevelChanges=*/false, Returns, ".i", - &InlinedFunctionInfo, IFI.TD, TheCall); + &InlinedFunctionInfo, IFI.DL, TheCall); // Remember the first block that is newly cloned over. FirstNewBlock = LastBlock; ++FirstNewBlock; @@ -669,9 +669,9 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, ConstantInt *AllocaSize = 0; if (ConstantInt *AIArraySize = dyn_cast<ConstantInt>(AI->getArraySize())) { - if (IFI.TD) { + if (IFI.DL) { Type *AllocaType = AI->getAllocatedType(); - uint64_t AllocaTypeSize = IFI.TD->getTypeAllocSize(AllocaType); + uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType); uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); assert(AllocaArraySize > 0 && "array size of AllocaInst is zero"); // Check that array size doesn't saturate uint64_t and doesn't @@ -908,7 +908,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, // the entries are the same or undef). If so, remove the PHI so it doesn't // block other optimizations. if (PHI) { - if (Value *V = SimplifyInstruction(PHI, IFI.TD)) { + if (Value *V = SimplifyInstruction(PHI, IFI.DL)) { PHI->replaceAllUsesWith(V); PHI->eraseFromParent(); } |