diff options
author | Chandler Carruth <chandlerc@gmail.com> | 2011-12-12 11:59:10 +0000 |
---|---|---|
committer | Chandler Carruth <chandlerc@gmail.com> | 2011-12-12 11:59:10 +0000 |
commit | ddbc274169ed4ee0e0ac32ed194b925a180202fe (patch) | |
tree | a15e94ed378c7e90d4b6985af905b07f82ad8a36 | |
parent | 2106badea341062643d4e11d6e9975b871fa61b9 (diff) | |
download | llvm-ddbc274169ed4ee0e0ac32ed194b925a180202fe.tar.gz |
Manually upgrade the test suite to specify the flag to cttz and ctlz.
I followed three heuristics for deciding whether to set 'true' or
'false':
- Everything target independent got 'true' as that is the expected
common output of the GCC builtins.
- If the target arch only has one way of implementing this operation,
set the flag in the way that exercises the most of codegen. For most
architectures this is also the likely path from a GCC builtin, with
'true' being set. It will (eventually) require lowering away that
difference, and then lowering to the architecture's operation.
- Otherwise, set the flag differently dependending on which target
operation should be tested.
Let me know if anyone has any issue with this pattern or would like
specific tests of another form. This should allow the x86 codegen to
just iteratively improve as I teach the backend how to differentiate
between the two forms, and everything else should remain exactly the
same.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146370 91177308-0d34-0410-b5e6-96231b3b80d8
22 files changed, 107 insertions, 107 deletions
diff --git a/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll b/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll index 035299e0ac82..5b81c17d43a3 100644 --- a/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll +++ b/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll @@ -3,12 +3,12 @@ ; RUN: grep {ret i32 0} ; END. -declare i16 @llvm.cttz.i16(i16) +declare i16 @llvm.cttz.i16(i16, i1) define i32 @test(i32* %P, i16* %Q) { %A = load i16* %Q ; <i16> [#uses=1] %x = load i32* %P ; <i32> [#uses=1] - %B = call i16 @llvm.cttz.i16( i16 %A ) ; <i16> [#uses=1] + %B = call i16 @llvm.cttz.i16( i16 %A, i1 true ) ; <i16> [#uses=1] %y = load i32* %P ; <i32> [#uses=1] store i16 %B, i16* %Q %z = sub i32 %x, %y ; <i32> [#uses=1] diff --git a/test/CodeGen/ARM/clz.ll b/test/CodeGen/ARM/clz.ll index e381e0029819..5b6a584bbee8 100644 --- a/test/CodeGen/ARM/clz.ll +++ b/test/CodeGen/ARM/clz.ll @@ -1,10 +1,10 @@ ; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s -declare i32 @llvm.ctlz.i32(i32) +declare i32 @llvm.ctlz.i32(i32, i1) define i32 @test(i32 %x) { ; CHECK: test ; CHECK: clz r0, r0 - %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x ) + %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x, i1 true ) ret i32 %tmp.1 } diff --git a/test/CodeGen/ARM/ctz.ll b/test/CodeGen/ARM/ctz.ll index 1d2ced37b035..5ebca53b4692 100644 --- a/test/CodeGen/ARM/ctz.ll +++ b/test/CodeGen/ARM/ctz.ll @@ -1,11 +1,11 @@ ; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s -declare i32 @llvm.cttz.i32(i32) +declare i32 @llvm.cttz.i32(i32, i1) define i32 @f1(i32 %a) { ; CHECK: f1: ; CHECK: rbit ; CHECK: clz - %tmp = call i32 @llvm.cttz.i32( i32 %a ) + %tmp = call i32 @llvm.cttz.i32( i32 %a, i1 true ) ret i32 %tmp } diff --git a/test/CodeGen/ARM/fold-const.ll b/test/CodeGen/ARM/fold-const.ll index 227e4e8aaa24..1ba561dd70b0 100644 --- a/test/CodeGen/ARM/fold-const.ll +++ b/test/CodeGen/ARM/fold-const.ll @@ -3,7 +3,7 @@ define i32 @f(i32 %a) nounwind readnone optsize ssp { entry: %conv = zext i32 %a to i64 - %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %conv) + %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %conv, i1 true) ; CHECK: clz ; CHECK-NOT: adds %cast = trunc i64 %tmp1 to i32 @@ -11,4 +11,4 @@ entry: ret i32 %sub } -declare i64 @llvm.ctlz.i64(i64) nounwind readnone +declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone diff --git a/test/CodeGen/Generic/2008-02-04-Ctlz.ll b/test/CodeGen/Generic/2008-02-04-Ctlz.ll index 288bfd245da9..9f102066f2bb 100644 --- a/test/CodeGen/Generic/2008-02-04-Ctlz.ll +++ b/test/CodeGen/Generic/2008-02-04-Ctlz.ll @@ -4,8 +4,8 @@ define i32 @main(i64 %arg) nounwind { entry: - %tmp37 = tail call i64 @llvm.ctlz.i64( i64 %arg ) ; <i64> [#uses=1] - %tmp47 = tail call i64 @llvm.cttz.i64( i64 %arg ) ; <i64> [#uses=1] + %tmp37 = tail call i64 @llvm.ctlz.i64( i64 %arg, i1 true ) ; <i64> [#uses=1] + %tmp47 = tail call i64 @llvm.cttz.i64( i64 %arg, i1 true ) ; <i64> [#uses=1] %tmp57 = tail call i64 @llvm.ctpop.i64( i64 %arg ) ; <i64> [#uses=1] %tmp38 = trunc i64 %tmp37 to i32 ; <i32>:0 [#uses=1] %tmp48 = trunc i64 %tmp47 to i32 ; <i32>:0 [#uses=1] @@ -16,6 +16,6 @@ entry: declare i32 @printf(i8* noalias , ...) nounwind -declare i64 @llvm.ctlz.i64(i64) nounwind readnone -declare i64 @llvm.cttz.i64(i64) nounwind readnone +declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone +declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone declare i64 @llvm.ctpop.i64(i64) nounwind readnone diff --git a/test/CodeGen/Generic/llvm-ct-intrinsics.ll b/test/CodeGen/Generic/llvm-ct-intrinsics.ll index 1db75497592f..abcdb9bbbc14 100644 --- a/test/CodeGen/Generic/llvm-ct-intrinsics.ll +++ b/test/CodeGen/Generic/llvm-ct-intrinsics.ll @@ -21,19 +21,19 @@ define void @ctpoptest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %C ret void } -declare i64 @llvm.ctlz.i64(i64) +declare i64 @llvm.ctlz.i64(i64, i1) -declare i32 @llvm.ctlz.i32(i32) +declare i32 @llvm.ctlz.i32(i32, i1) -declare i16 @llvm.ctlz.i16(i16) +declare i16 @llvm.ctlz.i16(i16, i1) -declare i8 @llvm.ctlz.i8(i8) +declare i8 @llvm.ctlz.i8(i8, i1) define void @ctlztest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %CP, i64* %DP) { - %a = call i8 @llvm.ctlz.i8( i8 %A ) ; <i8> [#uses=1] - %b = call i16 @llvm.ctlz.i16( i16 %B ) ; <i16> [#uses=1] - %c = call i32 @llvm.ctlz.i32( i32 %C ) ; <i32> [#uses=1] - %d = call i64 @llvm.ctlz.i64( i64 %D ) ; <i64> [#uses=1] + %a = call i8 @llvm.ctlz.i8( i8 %A, i1 true ) ; <i8> [#uses=1] + %b = call i16 @llvm.ctlz.i16( i16 %B, i1 true ) ; <i16> [#uses=1] + %c = call i32 @llvm.ctlz.i32( i32 %C, i1 true ) ; <i32> [#uses=1] + %d = call i64 @llvm.ctlz.i64( i64 %D, i1 true ) ; <i64> [#uses=1] store i8 %a, i8* %AP store i16 %b, i16* %BP store i32 %c, i32* %CP @@ -41,19 +41,19 @@ define void @ctlztest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %CP ret void } -declare i64 @llvm.cttz.i64(i64) +declare i64 @llvm.cttz.i64(i64, i1) -declare i32 @llvm.cttz.i32(i32) +declare i32 @llvm.cttz.i32(i32, i1) -declare i16 @llvm.cttz.i16(i16) +declare i16 @llvm.cttz.i16(i16, i1) -declare i8 @llvm.cttz.i8(i8) +declare i8 @llvm.cttz.i8(i8, i1) define void @cttztest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %CP, i64* %DP) { - %a = call i8 @llvm.cttz.i8( i8 %A ) ; <i8> [#uses=1] - %b = call i16 @llvm.cttz.i16( i16 %B ) ; <i16> [#uses=1] - %c = call i32 @llvm.cttz.i32( i32 %C ) ; <i32> [#uses=1] - %d = call i64 @llvm.cttz.i64( i64 %D ) ; <i64> [#uses=1] + %a = call i8 @llvm.cttz.i8( i8 %A, i1 true ) ; <i8> [#uses=1] + %b = call i16 @llvm.cttz.i16( i16 %B, i1 true ) ; <i16> [#uses=1] + %c = call i32 @llvm.cttz.i32( i32 %C, i1 true ) ; <i32> [#uses=1] + %d = call i64 @llvm.cttz.i64( i64 %D, i1 true ) ; <i64> [#uses=1] store i8 %a, i8* %AP store i16 %b, i16* %BP store i32 %c, i32* %CP diff --git a/test/CodeGen/Mips/2008-08-08-ctlz.ll b/test/CodeGen/Mips/2008-08-08-ctlz.ll index 522018365269..abd61de5a8d8 100644 --- a/test/CodeGen/Mips/2008-08-08-ctlz.ll +++ b/test/CodeGen/Mips/2008-08-08-ctlz.ll @@ -3,8 +3,8 @@ define i32 @A0(i32 %u) nounwind { entry: ; CHECK: clz - call i32 @llvm.ctlz.i32( i32 %u ) + call i32 @llvm.ctlz.i32( i32 %u, i1 true ) ret i32 %0 } -declare i32 @llvm.ctlz.i32(i32) nounwind readnone +declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone diff --git a/test/CodeGen/Mips/2010-11-09-CountLeading.ll b/test/CodeGen/Mips/2010-11-09-CountLeading.ll index c592b311782f..6174500d3e0b 100644 --- a/test/CodeGen/Mips/2010-11-09-CountLeading.ll +++ b/test/CodeGen/Mips/2010-11-09-CountLeading.ll @@ -3,16 +3,16 @@ ; CHECK: clz $2, $4 define i32 @t1(i32 %X) nounwind readnone { entry: - %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X) + %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true) ret i32 %tmp1 } -declare i32 @llvm.ctlz.i32(i32) nounwind readnone +declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone ; CHECK: clz $2, $4 define i32 @t2(i32 %X) nounwind readnone { entry: - %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X) + %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %X, i1 true) ret i32 %tmp1 } @@ -20,7 +20,7 @@ entry: define i32 @t3(i32 %X) nounwind readnone { entry: %neg = xor i32 %X, -1 - %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg) + %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true) ret i32 %tmp1 } @@ -28,6 +28,6 @@ entry: define i32 @t4(i32 %X) nounwind readnone { entry: %neg = xor i32 %X, -1 - %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg) + %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %neg, i1 true) ret i32 %tmp1 } diff --git a/test/CodeGen/Mips/mips64instrs.ll b/test/CodeGen/Mips/mips64instrs.ll index 9bc178c63d4f..041831149057 100644 --- a/test/CodeGen/Mips/mips64instrs.ll +++ b/test/CodeGen/Mips/mips64instrs.ll @@ -116,12 +116,12 @@ entry: ret i64 %rem } -declare i64 @llvm.ctlz.i64(i64) nounwind readnone +declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone define i64 @f18(i64 %X) nounwind readnone { entry: ; CHECK: dclz $2, $4 - %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X) + %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %X, i1 true) ret i64 %tmp1 } @@ -129,7 +129,7 @@ define i64 @f19(i64 %X) nounwind readnone { entry: ; CHECK: dclo $2, $4 %neg = xor i64 %X, -1 - %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg) + %tmp1 = tail call i64 @llvm.ctlz.i64(i64 %neg, i1 true) ret i64 %tmp1 } diff --git a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll index cca9e658ad5f..3620b0e6340a 100644 --- a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll +++ b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll @@ -2,11 +2,11 @@ define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) nounwind { %tmp19 = load i64* %t - %tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19 ) ; <i64> [#uses=1] + %tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19, i1 true ) ; <i64> [#uses=1] %tmp23 = trunc i64 %tmp22 to i32 %tmp89 = add i32 %tmp23, -64 ; <i32> [#uses=1] %tmp90 = add i32 %tmp89, 0 ; <i32> [#uses=1] ret i32 %tmp90 } -declare i64 @llvm.ctlz.i64(i64) +declare i64 @llvm.ctlz.i64(i64, i1) diff --git a/test/CodeGen/PowerPC/cttz.ll b/test/CodeGen/PowerPC/cttz.ll index ab493a068a32..1d365d47a877 100644 --- a/test/CodeGen/PowerPC/cttz.ll +++ b/test/CodeGen/PowerPC/cttz.ll @@ -1,11 +1,11 @@ ; Make sure this testcase does not use ctpop ; RUN: llc < %s -march=ppc32 | grep -i cntlzw -declare i32 @llvm.cttz.i32(i32) +declare i32 @llvm.cttz.i32(i32, i1) define i32 @bar(i32 %x) { entry: - %tmp.1 = call i32 @llvm.cttz.i32( i32 %x ) ; <i32> [#uses=1] + %tmp.1 = call i32 @llvm.cttz.i32( i32 %x, i1 true ) ; <i32> [#uses=1] ret i32 %tmp.1 } diff --git a/test/CodeGen/Thumb2/thumb2-clz.ll b/test/CodeGen/Thumb2/thumb2-clz.ll index 00a54a0f1952..f7e966535d2f 100644 --- a/test/CodeGen/Thumb2/thumb2-clz.ll +++ b/test/CodeGen/Thumb2/thumb2-clz.ll @@ -3,8 +3,8 @@ define i32 @f1(i32 %a) { ; CHECK: f1: ; CHECK: clz r - %tmp = tail call i32 @llvm.ctlz.i32(i32 %a) + %tmp = tail call i32 @llvm.ctlz.i32(i32 %a, i1 true) ret i32 %tmp } -declare i32 @llvm.ctlz.i32(i32) nounwind readnone +declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll index 69cf7365c507..cde9b4884dac 100644 --- a/test/CodeGen/X86/bmi.ll +++ b/test/CodeGen/X86/bmi.ll @@ -1,40 +1,40 @@ ; RUN: llc < %s -march=x86-64 -mattr=+bmi,+bmi2 | FileCheck %s define i32 @t1(i32 %x) nounwind { - %tmp = tail call i32 @llvm.cttz.i32( i32 %x ) + %tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 false ) ret i32 %tmp ; CHECK: t1: ; CHECK: tzcntl } -declare i32 @llvm.cttz.i32(i32) nounwind readnone +declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone define i16 @t2(i16 %x) nounwind { - %tmp = tail call i16 @llvm.cttz.i16( i16 %x ) + %tmp = tail call i16 @llvm.cttz.i16( i16 %x, i1 false ) ret i16 %tmp ; CHECK: t2: ; CHECK: tzcntw } -declare i16 @llvm.cttz.i16(i16) nounwind readnone +declare i16 @llvm.cttz.i16(i16, i1) nounwind readnone define i64 @t3(i64 %x) nounwind { - %tmp = tail call i64 @llvm.cttz.i64( i64 %x ) + %tmp = tail call i64 @llvm.cttz.i64( i64 %x, i1 false ) ret i64 %tmp ; CHECK: t3: ; CHECK: tzcntq } -declare i64 @llvm.cttz.i64(i64) nounwind readnone +declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone define i8 @t4(i8 %x) nounwind { - %tmp = tail call i8 @llvm.cttz.i8( i8 %x ) + %tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 false ) ret i8 %tmp ; CHECK: t4: ; CHECK: tzcntw } -declare i8 @llvm.cttz.i8(i8) nounwind readnone +declare i8 @llvm.cttz.i8(i8, i1) nounwind readnone define i32 @andn32(i32 %x, i32 %y) nounwind readnone { %tmp1 = xor i32 %x, -1 diff --git a/test/CodeGen/X86/clz.ll b/test/CodeGen/X86/clz.ll index d76fab4123bd..9b26efd10dea 100644 --- a/test/CodeGen/X86/clz.ll +++ b/test/CodeGen/X86/clz.ll @@ -1,36 +1,36 @@ ; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck %s define i32 @t1(i32 %x) nounwind { - %tmp = tail call i32 @llvm.ctlz.i32( i32 %x ) + %tmp = tail call i32 @llvm.ctlz.i32( i32 %x, i1 true ) ret i32 %tmp ; CHECK: t1: ; CHECK: bsrl ; CHECK: cmov } -declare i32 @llvm.ctlz.i32(i32) nounwind readnone +declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone define i32 @t2(i32 %x) nounwind { - %tmp = tail call i32 @llvm.cttz.i32( i32 %x ) + %tmp = tail call i32 @llvm.cttz.i32( i32 %x, i1 true ) ret i32 %tmp ; CHECK: t2: ; CHECK: bsfl ; CHECK: cmov } -declare i32 @llvm.cttz.i32(i32) nounwind readnone +declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone define i16 @t3(i16 %x, i16 %y) nounwind { entry: %tmp1 = add i16 %x, %y - %tmp2 = tail call i16 @llvm.ctlz.i16( i16 %tmp1 ) ; <i16> [#uses=1] + %tmp2 = tail call i16 @llvm.ctlz.i16( i16 %tmp1, i1 true ) ; <i16> [#uses=1] ret i16 %tmp2 ; CHECK: t3: ; CHECK: bsrw ; CHECK: cmov } -declare i16 @llvm.ctlz.i16(i16) nounwind readnone +declare i16 @llvm.ctlz.i16(i16, i1) nounwind readnone ; Don't generate the cmovne when the source is known non-zero (and bsr would ; not set ZF). @@ -43,6 +43,6 @@ entry: ; CHECK-NOT: cmov ; CHECK: ret %or = or i32 %n, 1 - %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %or) + %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %or, i1 true) ret i32 %tmp1 } diff --git a/test/CodeGen/X86/lzcnt.ll b/test/CodeGen/X86/lzcnt.ll index e5a55abf1ab7..adfc38b35edb 100644 --- a/test/CodeGen/X86/lzcnt.ll +++ b/test/CodeGen/X86/lzcnt.ll @@ -1,38 +1,38 @@ ; RUN: llc < %s -march=x86-64 -mattr=+lzcnt | FileCheck %s define i32 @t1(i32 %x) nounwind { - %tmp = tail call i32 @llvm.ctlz.i32( i32 %x ) + %tmp = tail call i32 @llvm.ctlz.i32( i32 %x, i1 false ) ret i32 %tmp ; CHECK: t1: ; CHECK: lzcntl } -declare i32 @llvm.ctlz.i32(i32) nounwind readnone +declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone define i16 @t2(i16 %x) nounwind { - %tmp = tail call i16 @llvm.ctlz.i16( i16 %x ) + %tmp = tail call i16 @llvm.ctlz.i16( i16 %x, i1 false ) ret i16 %tmp ; CHECK: t2: ; CHECK: lzcntw } -declare i16 @llvm.ctlz.i16(i16) nounwind readnone +declare i16 @llvm.ctlz.i16(i16, i1) nounwind readnone define i64 @t3(i64 %x) nounwind { - %tmp = tail call i64 @llvm.ctlz.i64( i64 %x ) + %tmp = tail call i64 @llvm.ctlz.i64( i64 %x, i1 false ) ret i64 %tmp ; CHECK: t3: ; CHECK: lzcntq } -declare i64 @llvm.ctlz.i64(i64) nounwind readnone +declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone define i8 @t4(i8 %x) nounwind { - %tmp = tail call i8 @llvm.ctlz.i8( i8 %x ) + %tmp = tail call i8 @llvm.ctlz.i8( i8 %x, i1 false ) ret i8 %tmp ; CHECK: t4: ; CHECK: lzcntw } -declare i8 @llvm.ctlz.i8(i8) nounwind readnone +declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone diff --git a/test/CodeGen/X86/vec_ctbits.ll b/test/CodeGen/X86/vec_ctbits.ll index f0158d643c17..bddd53514643 100644 --- a/test/CodeGen/X86/vec_ctbits.ll +++ b/test/CodeGen/X86/vec_ctbits.ll @@ -1,15 +1,15 @@ ; RUN: llc < %s -march=x86-64 -declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) -declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) +declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) +declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) define <2 x i64> @footz(<2 x i64> %a) nounwind { - %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a) + %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 true) ret <2 x i64> %c } define <2 x i64> @foolz(<2 x i64> %a) nounwind { - %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a) + %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 true) ret <2 x i64> %c } define <2 x i64> @foopop(<2 x i64> %a) nounwind { diff --git a/test/Feature/intrinsics.ll b/test/Feature/intrinsics.ll index fc13d5a63158..c4e3db6174a6 100644 --- a/test/Feature/intrinsics.ll +++ b/test/Feature/intrinsics.ll @@ -15,21 +15,21 @@ declare i32 @llvm.ctpop.i32(i32) declare i64 @llvm.ctpop.i64(i64) -declare i8 @llvm.cttz.i8(i8) +declare i8 @llvm.cttz.i8(i8, i1) -declare i16 @llvm.cttz.i16(i16) +declare i16 @llvm.cttz.i16(i16, i1) -declare i32 @llvm.cttz.i32(i32) +declare i32 @llvm.cttz.i32(i32, i1) -declare i64 @llvm.cttz.i64(i64) +declare i64 @llvm.cttz.i64(i64, i1) -declare i8 @llvm.ctlz.i8(i8) +declare i8 @llvm.ctlz.i8(i8, i1) -declare i16 @llvm.ctlz.i16(i16) +declare i16 @llvm.ctlz.i16(i16, i1) -declare i32 @llvm.ctlz.i32(i32) +declare i32 @llvm.ctlz.i32(i32, i1) -declare i64 @llvm.ctlz.i64(i64) +declare i64 @llvm.ctlz.i64(i64, i1) declare float @llvm.sqrt.f32(float) @@ -46,14 +46,14 @@ define void @libm() { call i16 @llvm.ctpop.i16( i16 11 ) ; <i32>:6 [#uses=0] call i32 @llvm.ctpop.i32( i32 12 ) ; <i32>:7 [#uses=0] call i64 @llvm.ctpop.i64( i64 13 ) ; <i32>:8 [#uses=0] - call i8 @llvm.ctlz.i8( i8 14 ) ; <i32>:9 [#uses=0] - call i16 @llvm.ctlz.i16( i16 15 ) ; <i32>:10 [#uses=0] - call i32 @llvm.ctlz.i32( i32 16 ) ; <i32>:11 [#uses=0] - call i64 @llvm.ctlz.i64( i64 17 ) ; <i32>:12 [#uses=0] - call i8 @llvm.cttz.i8( i8 18 ) ; <i32>:13 [#uses=0] - call i16 @llvm.cttz.i16( i16 19 ) ; <i32>:14 [#uses=0] - call i32 @llvm.cttz.i32( i32 20 ) ; <i32>:15 [#uses=0] - call i64 @llvm.cttz.i64( i64 21 ) ; <i32>:16 [#uses=0] + call i8 @llvm.ctlz.i8( i8 14, i1 true ) ; <i32>:9 [#uses=0] + call i16 @llvm.ctlz.i16( i16 15, i1 true ) ; <i32>:10 [#uses=0] + call i32 @llvm.ctlz.i32( i32 16, i1 true ) ; <i32>:11 [#uses=0] + call i64 @llvm.ctlz.i64( i64 17, i1 true ) ; <i32>:12 [#uses=0] + call i8 @llvm.cttz.i8( i8 18, i1 true ) ; <i32>:13 [#uses=0] + call i16 @llvm.cttz.i16( i16 19, i1 true ) ; <i32>:14 [#uses=0] + call i32 @llvm.cttz.i32( i32 20, i1 true ) ; <i32>:15 [#uses=0] + call i64 @llvm.cttz.i64( i64 21, i1 true ) ; <i32>:16 [#uses=0] ret void } diff --git a/test/Transforms/ConstProp/2007-11-23-cttz.ll b/test/Transforms/ConstProp/2007-11-23-cttz.ll index 37cda303713b..a28c9b0a2f13 100644 --- a/test/Transforms/ConstProp/2007-11-23-cttz.ll +++ b/test/Transforms/ConstProp/2007-11-23-cttz.ll @@ -1,8 +1,8 @@ ; RUN: opt < %s -constprop -S | grep {ret i13 13} ; PR1816 -declare i13 @llvm.cttz.i13(i13) +declare i13 @llvm.cttz.i13(i13, i1) define i13 @test() { - %X = call i13 @llvm.cttz.i13(i13 0) + %X = call i13 @llvm.cttz.i13(i13 0, i1 true) ret i13 %X } diff --git a/test/Transforms/InstCombine/bitcount.ll b/test/Transforms/InstCombine/bitcount.ll index f75ca2df69d1..a6fd83742c28 100644 --- a/test/Transforms/InstCombine/bitcount.ll +++ b/test/Transforms/InstCombine/bitcount.ll @@ -4,13 +4,13 @@ ; RUN: grep -v declare | not grep llvm.ct declare i31 @llvm.ctpop.i31(i31 %val) -declare i32 @llvm.cttz.i32(i32 %val) -declare i33 @llvm.ctlz.i33(i33 %val) +declare i32 @llvm.cttz.i32(i32 %val, i1) +declare i33 @llvm.ctlz.i33(i33 %val, i1) define i32 @test(i32 %A) { %c1 = call i31 @llvm.ctpop.i31(i31 12415124) - %c2 = call i32 @llvm.cttz.i32(i32 87359874) - %c3 = call i33 @llvm.ctlz.i33(i33 87359874) + %c2 = call i32 @llvm.cttz.i32(i32 87359874, i1 true) + %c3 = call i33 @llvm.ctlz.i33(i33 87359874, i1 true) %t1 = zext i31 %c1 to i32 %t3 = trunc i33 %c3 to i32 %r1 = add i32 %t1, %c2 diff --git a/test/Transforms/InstCombine/intrinsics.ll b/test/Transforms/InstCombine/intrinsics.ll index fb57a190aaba..e31bd7dfee08 100644 --- a/test/Transforms/InstCombine/intrinsics.ll +++ b/test/Transforms/InstCombine/intrinsics.ll @@ -5,10 +5,10 @@ declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8) declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8) declare double @llvm.powi.f64(double, i32) nounwind readonly -declare i32 @llvm.cttz.i32(i32) nounwind readnone -declare i32 @llvm.ctlz.i32(i32) nounwind readnone +declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone +declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone declare i32 @llvm.ctpop.i32(i32) nounwind readnone -declare i8 @llvm.ctlz.i8(i8) nounwind readnone +declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone define i8 @uaddtest1(i8 %A, i8 %B) { %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B) @@ -161,7 +161,7 @@ define i32 @cttz(i32 %a) { entry: %or = or i32 %a, 8 %and = and i32 %or, -8 - %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone + %count = tail call i32 @llvm.cttz.i32(i32 %and, i1 true) nounwind readnone ret i32 %count ; CHECK: @cttz ; CHECK-NEXT: entry: @@ -172,7 +172,7 @@ define i8 @ctlz(i8 %a) { entry: %or = or i8 %a, 32 %and = and i8 %or, 63 - %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone + %count = tail call i8 @llvm.ctlz.i8(i8 %and, i1 true) nounwind readnone ret i8 %count ; CHECK: @ctlz ; CHECK-NEXT: entry: @@ -181,10 +181,10 @@ entry: define void @cmp.simplify(i32 %a, i32 %b, i1* %c) { entry: - %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone + %lz = tail call i32 @llvm.ctlz.i32(i32 %a, i1 true) nounwind readnone %lz.cmp = icmp eq i32 %lz, 32 store volatile i1 %lz.cmp, i1* %c - %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone + %tz = tail call i32 @llvm.cttz.i32(i32 %a, i1 true) nounwind readnone %tz.cmp = icmp ne i32 %tz, 32 store volatile i1 %tz.cmp, i1* %c %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone @@ -203,7 +203,7 @@ entry: define i32 @cttz_simplify1(i32 %x) nounwind readnone ssp { - %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x) ; <i32> [#uses=1] + %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true) ; <i32> [#uses=1] %shr3 = lshr i32 %tmp1, 5 ; <i32> [#uses=1] ret i32 %shr3 diff --git a/test/Transforms/InstCombine/sext.ll b/test/Transforms/InstCombine/sext.ll index f49a2efb39d8..f1987973f462 100644 --- a/test/Transforms/InstCombine/sext.ll +++ b/test/Transforms/InstCombine/sext.ll @@ -3,8 +3,8 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" declare i32 @llvm.ctpop.i32(i32) -declare i32 @llvm.ctlz.i32(i32) -declare i32 @llvm.cttz.i32(i32) +declare i32 @llvm.ctlz.i32(i32, i1) +declare i32 @llvm.cttz.i32(i32, i1) define i64 @test1(i32 %x) { %t = call i32 @llvm.ctpop.i32(i32 %x) @@ -16,7 +16,7 @@ define i64 @test1(i32 %x) { } define i64 @test2(i32 %x) { - %t = call i32 @llvm.ctlz.i32(i32 %x) + %t = call i32 @llvm.ctlz.i32(i32 %x, i1 true) %s = sext i32 %t to i64 ret i64 %s @@ -25,7 +25,7 @@ define i64 @test2(i32 %x) { } define i64 @test3(i32 %x) { - %t = call i32 @llvm.cttz.i32(i32 %x) + %t = call i32 @llvm.cttz.i32(i32 %x, i1 true) %s = sext i32 %t to i64 ret i64 %s diff --git a/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll b/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll index cd6cf9704a58..63f41dbc0240 100644 --- a/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll +++ b/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll @@ -6,9 +6,9 @@ target triple = "i686-pc-linux-gnu" define i32 @x(i32 %b) { entry: - %val = call i32 @llvm.cttz.i32(i32 undef) + %val = call i32 @llvm.cttz.i32(i32 undef, i1 true) ret i32 %val } -declare i32 @llvm.cttz.i32(i32) +declare i32 @llvm.cttz.i32(i32, i1) |