summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Waller <peter.waller@arm.com>2021-10-28 12:14:52 +0000
committerPeter Waller <peter.waller@arm.com>2021-10-28 12:15:15 +0000
commit98f08752f76b80a509037986d620b402c48f5115 (patch)
tree1c86a3c2beed1bf9c5ade965a024f72f8703e20b
parent0a2708d2ae572b394ebe35b66ebc58143b9f40bf (diff)
downloadllvm-98f08752f76b80a509037986d620b402c48f5115.tar.gz
[InstCombine][ConstantFolding] Make ConstantFoldLoadThroughBitcast TypeSize-aware
The newly added test previously caused the compiler to fail an assertion. It looks like a strightforward TypeSize upgrade. Reviewed By: paulwalker-arm Differential Revision: https://reviews.llvm.org/D112142
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp6
-rw-r--r--llvm/test/Transforms/InstCombine/vscale_load.ll27
2 files changed, 30 insertions, 3 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 4cc3fcd149d6..3ed3b8902343 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -352,9 +352,9 @@ Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
const DataLayout &DL) {
do {
Type *SrcTy = C->getType();
- uint64_t DestSize = DL.getTypeSizeInBits(DestTy);
- uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy);
- if (SrcSize < DestSize)
+ TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
+ TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
+ if (!TypeSize::isKnownGE(SrcSize, DestSize))
return nullptr;
// Catch the obvious splat cases (since all-zeros can coerce non-integral
diff --git a/llvm/test/Transforms/InstCombine/vscale_load.ll b/llvm/test/Transforms/InstCombine/vscale_load.ll
new file mode 100644
index 000000000000..4041ffc0fb12
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/vscale_load.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S -verify | FileCheck %s
+
+define <2 x i8> @constprop_load_bitcast(<vscale x 16 x i1>* %ptr) {
+; CHECK-LABEL: @constprop_load_bitcast(
+; CHECK-NEXT: store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* [[PTR:%.*]], align 16
+; CHECK-NEXT: ret <2 x i8> zeroinitializer
+;
+ store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* %ptr, align 16
+ %cast_to_fixed = bitcast <vscale x 16 x i1>* %ptr to <2 x i8>*
+ %a = load <2 x i8>, <2 x i8>* %cast_to_fixed, align 16
+ ret <2 x i8> %a
+}
+
+; vscale-sized vec not guaranteed to fill destination.
+define <8 x i8> @constprop_load_bitcast_neg(<vscale x 16 x i1>* %ptr) {
+; CHECK-LABEL: @constprop_load_bitcast_neg(
+; CHECK-NEXT: store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* [[PTR:%.*]], align 16
+; CHECK-NEXT: [[CAST_TO_FIXED:%.*]] = bitcast <vscale x 16 x i1>* [[PTR]] to <8 x i8>*
+; CHECK-NEXT: [[A:%.*]] = load <8 x i8>, <8 x i8>* [[CAST_TO_FIXED]], align 16
+; CHECK-NEXT: ret <8 x i8> [[A]]
+;
+ store <vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1>* %ptr, align 16
+ %cast_to_fixed = bitcast <vscale x 16 x i1>* %ptr to <8 x i8>*
+ %a = load <8 x i8>, <8 x i8>* %cast_to_fixed, align 16
+ ret <8 x i8> %a
+}