summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Desaulniers <ndesaulniers@google.com>2021-10-08 15:17:54 -0700
committerTom Stellard <tstellar@redhat.com>2022-01-11 21:12:29 -0800
commit52a400d8e4c46876fae5d732c92df254a9ceae8f (patch)
tree4f754a965d0a04ea3ebe4192df45789f045eb7fb
parenteaeb7dcf32495155b5d430d2a650a211c9b230af (diff)
downloadllvm-52a400d8e4c46876fae5d732c92df254a9ceae8f.tar.gz
[InlineCost] model calls to llvm.is.constant* more carefully
llvm.is.constant* intrinsics are evaluated to 0 or 1 integral values. A common use case for llvm.is.constant comes from the higher level __builtin_constant_p. A common usage pattern of __builtin_constant_p in the Linux kernel is: void foo (int bar) { if (__builtin_constant_p(bar)) { // lots of code that will fold away to a constant. } else { // a little bit of code, usually a libcall. } } A minor issue in InlineCost calculations is when `bar` is _not_ Constant and still will not be after inlining, we don't discount the true branch and the inline cost of `foo` ends up being the cost of both branches together, rather than just the false branch. This leads to code like the above where inlining will not help prove bar Constant, but it still would be beneficial to inline foo, because the "true" branch is irrelevant from a cost perspective. For example, IPSCCP can sink a passed constant argument to foo: const int x = 42; void bar (void) { foo(x); } This improves our inlining decisions, and fixes a few head scratching cases were the disassembly shows a relatively small `foo` not inlined into a lone caller. We could further improve this modeling by tracking whether the argument to llvm.is.constant* is a parameter of the function, and if inlining would allow that parameter to become Constant. This idea is noted in a FIXME comment. Link: https://github.com/ClangBuiltLinux/linux/issues/1302 Reviewed By: kazu Differential Revision: https://reviews.llvm.org/D111272 (cherry picked from commit 9697f93587f46300814f1c6c68af347441d6e05d)
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp24
-rw-r--r--llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll39
2 files changed, 63 insertions, 0 deletions
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 4c2413e14435..e8f79a28a8e8 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -354,6 +354,7 @@ protected:
bool simplifyCallSite(Function *F, CallBase &Call);
template <typename Callable>
bool simplifyInstruction(Instruction &I, Callable Evaluate);
+ bool simplifyIntrinsicCallIsConstant(CallBase &CB);
ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
/// Return true if the given argument to the function being considered for
@@ -1471,6 +1472,27 @@ bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
return true;
}
+/// Try to simplify a call to llvm.is.constant.
+///
+/// Duplicate the argument checking from CallAnalyzer::simplifyCallSite since
+/// we expect calls of this specific intrinsic to be infrequent.
+///
+/// FIXME: Given that we know CB's parent (F) caller
+/// (CandidateCall->getParent()->getParent()), we might be able to determine
+/// whether inlining F into F's caller would change how the call to
+/// llvm.is.constant would evaluate.
+bool CallAnalyzer::simplifyIntrinsicCallIsConstant(CallBase &CB) {
+ Value *Arg = CB.getArgOperand(0);
+ auto *C = dyn_cast<Constant>(Arg);
+
+ if (!C)
+ C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(Arg));
+
+ Type *RT = CB.getFunctionType()->getReturnType();
+ SimplifiedValues[&CB] = ConstantInt::get(RT, C ? 1 : 0);
+ return true;
+}
+
bool CallAnalyzer::visitBitCast(BitCastInst &I) {
// Propagate constants through bitcasts.
if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
@@ -2091,6 +2113,8 @@ bool CallAnalyzer::visitCallBase(CallBase &Call) {
if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0)))
SROAArgValues[II] = SROAArg;
return true;
+ case Intrinsic::is_constant:
+ return simplifyIntrinsicCallIsConstant(Call);
}
}
diff --git a/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll
new file mode 100644
index 000000000000..3c96267a3fd5
--- /dev/null
+++ b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll
@@ -0,0 +1,39 @@
+; RUN: opt %s -passes=inline -inline-threshold=20 -S | FileCheck %s
+
+declare i1 @llvm.is.constant.i64(i64)
+declare void @foo()
+
+define void @callee(i64 %val) {
+ %cond = call i1 @llvm.is.constant.i64(i64 %val)
+ br i1 %cond, label %cond.true, label %cond.false
+
+cond.true:
+; Rack up costs with a couple of function calls so that this function
+; gets inlined only when @llvm.is.constant.i64 is folded. In reality,
+; the "then" clause of __builtin_constant_p tends to have statements
+; that fold very well, so the cost of the "then" clause is not a huge
+; concern.
+ call void @foo()
+ call void @foo()
+ ret void
+
+cond.false:
+ ret void
+}
+
+define void @caller(i64 %val) {
+; CHECK-LABEL: @caller(
+; CHECK-NEXT: [[COND_I:%.*]] = call i1 @llvm.is.constant.i64(i64 [[VAL:%.*]])
+; CHECK-NEXT: br i1 [[COND_I]], label %[[COND_TRUE_I:.*]], label %[[COND_FALSE_I:.*]]
+; CHECK: [[COND_TRUE_I]]:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: br label %[[CALLEE_EXIT:.*]]
+; CHECK: [[COND_FALSE_I]]:
+; CHECK-NEXT: br label %[[CALLEE_EXIT]]
+; CHECK: [[CALLEE_EXIT]]:
+; CHECK-NEXT: ret void
+;
+ call void @callee(i64 %val)
+ ret void
+}