summaryrefslogtreecommitdiff
path: root/mlir/test
diff options
context:
space:
mode:
authorTres Popp <tpopp@google.com>2023-05-08 16:33:54 +0200
committerTres Popp <tpopp@google.com>2023-05-12 11:21:25 +0200
commit5550c821897ab77e664977121a0e90ad5be1ff59 (patch)
tree1947e879997b2fccdb629789362c42310d1f1f84 /mlir/test
parent5c8ce6d5761ed6a9a39ef5a77aa45d8b6095e0f5 (diff)
downloadllvm-5550c821897ab77e664977121a0e90ad5be1ff59.tar.gz
[mlir] Move casting calls from methods to function calls
The MLIR classes Type/Attribute/Operation/Op/Value support cast/dyn_cast/isa/dyn_cast_or_null functionality through llvm's doCast functionality in addition to defining methods with the same name. This change begins the migration of uses of the method to the corresponding function call as has been decided as more consistent. Note that there still exist classes that only define methods directly, such as AffineExpr, and this does not include work currently to support a functional cast/isa call. Caveats include: - This clang-tidy script probably has more problems. - This only touches C++ code, so nothing that is being generated. Context: - https://mlir.llvm.org/deprecation/ at "Use the free function variants for dyn_cast/cast/isa/…" - Original discussion at https://discourse.llvm.org/t/preferred-casting-style-going-forward/68443 Implementation: This first patch was created with the following steps. The intention is to only do automated changes at first, so I waste less time if it's reverted, and so the first mass change is more clear as an example to other teams that will need to follow similar steps. Steps are described per line, as comments are removed by git: 0. Retrieve the change from the following to build clang-tidy with an additional check: https://github.com/llvm/llvm-project/compare/main...tpopp:llvm-project:tidy-cast-check 1. Build clang-tidy 2. Run clang-tidy over your entire codebase while disabling all checks and enabling the one relevant one. Run on all header files also. 3. Delete .inc files that were also modified, so the next build rebuilds them to a pure state. 4. Some changes have been deleted for the following reasons: - Some files had a variable also named cast - Some files had not included a header file that defines the cast functions - Some files are definitions of the classes that have the casting methods, so the code still refers to the method instead of the function without adding a prefix or removing the method declaration at the same time. ``` ninja -C $BUILD_DIR clang-tidy run-clang-tidy -clang-tidy-binary=$BUILD_DIR/bin/clang-tidy -checks='-*,misc-cast-functions'\ -header-filter=mlir/ mlir/* -fix rm -rf $BUILD_DIR/tools/mlir/**/*.inc git restore mlir/lib/IR mlir/lib/Dialect/DLTI/DLTI.cpp\ mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp\ mlir/lib/**/IR/\ mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp\ mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp\ mlir/test/lib/Dialect/Test/TestTypes.cpp\ mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp\ mlir/test/lib/Dialect/Test/TestAttributes.cpp\ mlir/unittests/TableGen/EnumsGenTest.cpp\ mlir/test/python/lib/PythonTestCAPI.cpp\ mlir/include/mlir/IR/ ``` Differential Revision: https://reviews.llvm.org/D150123
Diffstat (limited to 'mlir/test')
-rw-r--r--mlir/test/lib/Analysis/TestAliasAnalysis.cpp8
-rw-r--r--mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp2
-rw-r--r--mlir/test/lib/Conversion/OneToNTypeConversion/TestOneToNTypeConversionPass.cpp8
-rw-r--r--mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp6
-rw-r--r--mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp6
-rw-r--r--mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp4
-rw-r--r--mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp4
-rw-r--r--mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp4
-rw-r--r--mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp2
-rw-r--r--mlir/test/lib/Dialect/Test/TestDialect.cpp36
-rw-r--r--mlir/test/lib/Dialect/Test/TestPatterns.cpp12
-rw-r--r--mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp18
-rw-r--r--mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp10
-rw-r--r--mlir/test/lib/IR/TestBuiltinAttributeInterfaces.cpp2
-rw-r--r--mlir/test/lib/IR/TestDiagnostics.cpp2
-rw-r--r--mlir/test/lib/IR/TestFunc.cpp14
-rw-r--r--mlir/test/lib/IR/TestInterfaces.cpp4
-rw-r--r--mlir/test/lib/IR/TestOpaqueLoc.cpp2
-rw-r--r--mlir/test/lib/IR/TestPrintDefUse.cpp2
-rw-r--r--mlir/test/lib/Transforms/TestTopologicalSort.cpp2
20 files changed, 74 insertions, 74 deletions
diff --git a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp
index b563be4a57d4..95c16a6cc589 100644
--- a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp
+++ b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp
@@ -24,7 +24,7 @@ static void printAliasOperand(Operation *op) {
llvm::errs() << op->getAttrOfType<StringAttr>("test.ptr").getValue();
}
static void printAliasOperand(Value value) {
- if (BlockArgument arg = value.dyn_cast<BlockArgument>()) {
+ if (BlockArgument arg = dyn_cast<BlockArgument>(value)) {
Region *region = arg.getParentRegion();
unsigned parentBlockNumber =
std::distance(region->begin(), arg.getOwner()->getIterator());
@@ -37,7 +37,7 @@ static void printAliasOperand(Value value) {
llvm::errs() << "#" << arg.getArgNumber();
return;
}
- OpResult result = value.cast<OpResult>();
+ OpResult result = cast<OpResult>(value);
printAliasOperand(result.getOwner());
llvm::errs() << "#" << result.getResultNumber();
}
@@ -156,7 +156,7 @@ struct TestAliasAnalysisModRefPass
/// Check if value is function argument.
static bool isFuncArg(Value val) {
- auto blockArg = val.dyn_cast<BlockArgument>();
+ auto blockArg = dyn_cast<BlockArgument>(val);
if (!blockArg)
return false;
@@ -166,7 +166,7 @@ static bool isFuncArg(Value val) {
/// Check if value has "restrict" attribute. Value must be a function argument.
static bool isRestrict(Value val) {
- auto blockArg = val.cast<BlockArgument>();
+ auto blockArg = cast<BlockArgument>(val);
auto func =
mlir::cast<FunctionOpInterface>(blockArg.getOwner()->getParentOp());
return !!func.getArgAttr(blockArg.getArgNumber(),
diff --git a/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp b/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp
index c9e72f844a1f..968e10b8d0ca 100644
--- a/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp
+++ b/mlir/test/lib/Analysis/TestMemRefStrideCalculation.cpp
@@ -32,7 +32,7 @@ struct TestMemRefStrideCalculation
void TestMemRefStrideCalculation::runOnOperation() {
llvm::outs() << "Testing: " << getOperation().getName() << "\n";
getOperation().walk([&](memref::AllocOp allocOp) {
- auto memrefType = allocOp.getResult().getType().cast<MemRefType>();
+ auto memrefType = cast<MemRefType>(allocOp.getResult().getType());
int64_t offset;
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(memrefType, strides, offset))) {
diff --git a/mlir/test/lib/Conversion/OneToNTypeConversion/TestOneToNTypeConversionPass.cpp b/mlir/test/lib/Conversion/OneToNTypeConversion/TestOneToNTypeConversionPass.cpp
index c3f20989dbd6..e1ccc1b900df 100644
--- a/mlir/test/lib/Conversion/OneToNTypeConversion/TestOneToNTypeConversionPass.cpp
+++ b/mlir/test/lib/Conversion/OneToNTypeConversion/TestOneToNTypeConversionPass.cpp
@@ -102,7 +102,7 @@ public:
matchAndRewrite(::test::GetTupleElementOp op, OpAdaptor adaptor,
OneToNPatternRewriter &rewriter) const override {
// Construct mapping for tuple element types.
- auto stateType = op->getOperand(0).getType().cast<TupleType>();
+ auto stateType = cast<TupleType>(op->getOperand(0).getType());
TypeRange originalElementTypes = stateType.getTypes();
OneToNTypeMapping elementMapping(originalElementTypes);
if (failed(typeConverter->convertSignatureArgs(originalElementTypes,
@@ -148,7 +148,7 @@ static void populateDecomposeTuplesTestPatterns(TypeConverter &typeConverter,
static std::optional<SmallVector<Value>>
buildGetTupleElementOps(OpBuilder &builder, TypeRange resultTypes, Value input,
Location loc) {
- TupleType inputType = input.getType().dyn_cast<TupleType>();
+ TupleType inputType = dyn_cast<TupleType>(input.getType());
if (!inputType)
return {};
@@ -156,7 +156,7 @@ buildGetTupleElementOps(OpBuilder &builder, TypeRange resultTypes, Value input,
for (auto [idx, elementType] : llvm::enumerate(inputType.getTypes())) {
Value element = builder.create<::test::GetTupleElementOp>(
loc, elementType, input, builder.getI32IntegerAttr(idx));
- if (auto nestedTupleType = elementType.dyn_cast<TupleType>()) {
+ if (auto nestedTupleType = dyn_cast<TupleType>(elementType)) {
// Recurse if the current element is also a tuple.
SmallVector<Type> flatRecursiveTypes;
nestedTupleType.getFlattenedTypes(flatRecursiveTypes);
@@ -186,7 +186,7 @@ static std::optional<Value> buildMakeTupleOp(OpBuilder &builder,
elements.reserve(resultType.getTypes().size());
ValueRange::iterator inputIt = inputs.begin();
for (Type elementType : resultType.getTypes()) {
- if (auto nestedTupleType = elementType.dyn_cast<TupleType>()) {
+ if (auto nestedTupleType = dyn_cast<TupleType>(elementType)) {
// Determine how many input values are needed for the nested elements of
// the nested TupleType and advance inputIt by that number.
// TODO: We only need the *number* of nested types, not the types itself.
diff --git a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
index 1bf3ce4ceb32..dff619efda28 100644
--- a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
@@ -81,7 +81,7 @@ static LogicalResult testReifyValueBounds(func::FuncOp funcOp,
return WalkResult::skip();
}
Value value = op->getOperand(0);
- if (value.getType().isa<IndexType>() !=
+ if (isa<IndexType>(value.getType()) !=
!op->hasAttrOfType<IntegerAttr>("dim")) {
// Op should have "dim" attribute if and only if the operand is an
// index-typed value.
@@ -119,7 +119,7 @@ static LogicalResult testReifyValueBounds(func::FuncOp funcOp,
if (reifyToFuncArgs) {
// Reify in terms of function block arguments.
stopCondition = stopCondition = [](Value v, std::optional<int64_t> d) {
- auto bbArg = v.dyn_cast<BlockArgument>();
+ auto bbArg = dyn_cast<BlockArgument>(v);
if (!bbArg)
return false;
return isa<FunctionOpInterface>(
@@ -166,7 +166,7 @@ static LogicalResult testReifyValueBounds(func::FuncOp funcOp,
return WalkResult::skip();
}
Value constOp = rewriter.create<arith::ConstantIndexOp>(
- op->getLoc(), reified->get<Attribute>().cast<IntegerAttr>().getInt());
+ op->getLoc(), cast<IntegerAttr>(reified->get<Attribute>()).getInt());
rewriter.replaceOp(op, constOp);
return WalkResult::skip();
}
diff --git a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
index 85dd0718c9f0..f8588fab3aef 100644
--- a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
@@ -127,7 +127,7 @@ void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) {
// As a consequence we write only Ops with a single return type for the
// purpose of this test. If we need to test more intricate behavior in the
// future we can always extend.
- auto superVectorType = opInst->getResult(0).getType().cast<VectorType>();
+ auto superVectorType = cast<VectorType>(opInst->getResult(0).getType());
auto ratio =
computeShapeRatio(superVectorType.getShape(), subVectorType.getShape());
if (!ratio) {
@@ -211,8 +211,8 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) {
maps.reserve(matches.size());
for (auto m : llvm::reverse(matches)) {
auto *opInst = m.getMatchedOperation();
- auto map = opInst->getAttr(VectorizerTestPass::kTestAffineMapAttrName)
- .cast<AffineMapAttr>()
+ auto map = cast<AffineMapAttr>(
+ opInst->getAttr(VectorizerTestPass::kTestAffineMapAttrName))
.getValue();
maps.push_back(map);
}
diff --git a/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp b/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
index 41e166600c43..10aba733bd56 100644
--- a/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
+++ b/mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
@@ -27,7 +27,7 @@ static LogicalResult buildDecomposeTuple(OpBuilder &builder, Location loc,
Type elementType = resultType.getType(i);
Value element = builder.create<test::GetTupleElementOp>(
loc, elementType, value, builder.getI32IntegerAttr(i));
- if (auto nestedTupleType = elementType.dyn_cast<TupleType>()) {
+ if (auto nestedTupleType = dyn_cast<TupleType>(elementType)) {
// Recurse if the current element is also a tuple.
if (failed(buildDecomposeTuple(builder, loc, nestedTupleType, element,
values)))
@@ -50,7 +50,7 @@ static std::optional<Value> buildMakeTupleOp(OpBuilder &builder,
elements.reserve(resultType.getTypes().size());
ValueRange::iterator inputIt = inputs.begin();
for (Type elementType : resultType.getTypes()) {
- if (auto nestedTupleType = elementType.dyn_cast<TupleType>()) {
+ if (auto nestedTupleType = dyn_cast<TupleType>(elementType)) {
// Determine how many input values are needed for the nested elements of
// the nested TupleType and advance inputIt by that number.
// TODO: We only need the *number* of nested types, not the types itself.
diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
index 50504988689b..2231e427007a 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
@@ -38,9 +38,9 @@ static LogicalResult fuseLinalgOpsGreedily(func::FuncOp f) {
bool changed = false;
for (LinalgOp linalgOp : llvm::reverse(linalgOps)) {
for (OpOperand &opOperand : linalgOp->getOpOperands()) {
- if (opOperand.get().getType().isa<MemRefType>())
+ if (isa<MemRefType>(opOperand.get().getType()))
continue;
- if (opOperand.get().getType().isa<RankedTensorType>()) {
+ if (isa<RankedTensorType>(opOperand.get().getType())) {
// Tile and Fuse tensor input.
if (opOperand.getOperandNumber() >= linalgOp.getNumDpsInputs())
continue;
diff --git a/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp b/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
index 449a3e92b7da..0f0875874c49 100644
--- a/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
+++ b/mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
@@ -61,9 +61,9 @@ void ReportShapeFnPass::runOnOperation() {
if (attr) {
auto lookup = [&](Attribute attr) {
return cast<shape::FunctionLibraryOp>(
- SymbolTable::lookupSymbolIn(module, attr.cast<SymbolRefAttr>()));
+ SymbolTable::lookupSymbolIn(module, cast<SymbolRefAttr>(attr)));
};
- if (auto arrayAttr = attr.dyn_cast<ArrayAttr>()) {
+ if (auto arrayAttr = dyn_cast<ArrayAttr>(attr)) {
libraries.reserve(arrayAttr.size());
for (auto attr : arrayAttr)
libraries.push_back(lookup(attr));
diff --git a/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp b/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
index 6dc8b4a27d29..46fe86524797 100644
--- a/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp
@@ -113,7 +113,7 @@ static void applyFoldConstantExtractSlicePatterns(Operation *rootOp) {
if (!op.getSource().hasOneUse())
return false;
- auto resultType = op.getResult().getType().cast<ShapedType>();
+ auto resultType = cast<ShapedType>(op.getResult().getType());
constexpr int64_t kConstantFoldingMaxNumElements = 1024;
return resultType.getNumElements() <= kConstantFoldingMaxNumElements;
};
diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp
index 4660d9abe676..715c77b9a396 100644
--- a/mlir/test/lib/Dialect/Test/TestDialect.cpp
+++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp
@@ -49,7 +49,7 @@ Attribute MyPropStruct::asAttribute(MLIRContext *ctx) const {
}
LogicalResult MyPropStruct::setFromAttr(MyPropStruct &prop, Attribute attr,
InFlightDiagnostic *diag) {
- StringAttr strAttr = attr.dyn_cast<StringAttr>();
+ StringAttr strAttr = dyn_cast<StringAttr>(attr);
if (!strAttr) {
if (diag)
*diag << "Expect StringAttr but got " << attr;
@@ -221,7 +221,7 @@ struct TestOpAsmInterface : public OpAsmDialectInterface {
//===------------------------------------------------------------------===//
AliasResult getAlias(Attribute attr, raw_ostream &os) const final {
- StringAttr strAttr = attr.dyn_cast<StringAttr>();
+ StringAttr strAttr = dyn_cast<StringAttr>(attr);
if (!strAttr)
return AliasResult::NoAlias;
@@ -246,16 +246,16 @@ struct TestOpAsmInterface : public OpAsmDialectInterface {
}
AliasResult getAlias(Type type, raw_ostream &os) const final {
- if (auto tupleType = type.dyn_cast<TupleType>()) {
+ if (auto tupleType = dyn_cast<TupleType>(type)) {
if (tupleType.size() > 0 &&
llvm::all_of(tupleType.getTypes(), [](Type elemType) {
- return elemType.isa<SimpleAType>();
+ return isa<SimpleAType>(elemType);
})) {
os << "test_tuple";
return AliasResult::FinalAlias;
}
}
- if (auto intType = type.dyn_cast<TestIntegerType>()) {
+ if (auto intType = dyn_cast<TestIntegerType>(type)) {
if (intType.getSignedness() ==
TestIntegerType::SignednessSemantics::Unsigned &&
intType.getWidth() == 8) {
@@ -263,7 +263,7 @@ struct TestOpAsmInterface : public OpAsmDialectInterface {
return AliasResult::FinalAlias;
}
}
- if (auto recType = type.dyn_cast<TestRecursiveType>()) {
+ if (auto recType = dyn_cast<TestRecursiveType>(type)) {
if (recType.getName() == "type_to_alias") {
// We only make alias for a specific recursive type.
os << "testrec";
@@ -1230,7 +1230,7 @@ void PolyForOp::getAsmBlockArgumentNames(Region &region,
auto args = getRegion().front().getArguments();
auto e = std::min(arrayAttr.size(), args.size());
for (unsigned i = 0; i < e; ++i) {
- if (auto strAttr = arrayAttr[i].dyn_cast<StringAttr>())
+ if (auto strAttr = dyn_cast<StringAttr>(arrayAttr[i]))
setNameFn(args[i], strAttr.getValue());
}
}
@@ -1252,7 +1252,7 @@ static ParseResult parseOptionalLoc(OpAsmParser &p, Attribute &loc) {
}
static void printOptionalLoc(OpAsmPrinter &p, Operation *op, Attribute loc) {
- p.printOptionalLocationSpecifier(loc.cast<LocationAttr>());
+ p.printOptionalLocationSpecifier(cast<LocationAttr>(loc));
}
//===----------------------------------------------------------------------===//
@@ -1376,7 +1376,7 @@ LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents(
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
// Create return type consisting of the last element of the first operand.
auto operandType = operands.front().getType();
- auto sval = operandType.dyn_cast<ShapedType>();
+ auto sval = dyn_cast<ShapedType>(operandType);
if (!sval) {
return emitOptionalError(location, "only shaped type operands allowed");
}
@@ -1384,7 +1384,7 @@ LogicalResult OpWithShapedTypeInferTypeInterfaceOp::inferReturnTypeComponents(
auto type = IntegerType::get(context, 17);
Attribute encoding;
- if (auto rankedTy = sval.dyn_cast<RankedTensorType>())
+ if (auto rankedTy = dyn_cast<RankedTensorType>(sval))
encoding = rankedTy.getEncoding();
inferredReturnShapes.push_back(ShapedTypeComponents({dim}, type, encoding));
return success();
@@ -1404,7 +1404,7 @@ LogicalResult OpWithResultShapeInterfaceOp::reifyReturnTypeShapes(
Location loc = getLoc();
shapes.reserve(operands.size());
for (Value operand : llvm::reverse(operands)) {
- auto rank = operand.getType().cast<RankedTensorType>().getRank();
+ auto rank = cast<RankedTensorType>(operand.getType()).getRank();
auto currShape = llvm::to_vector<4>(
llvm::map_range(llvm::seq<int64_t>(0, rank), [&](int64_t dim) -> Value {
return builder.createOrFold<tensor::DimOp>(loc, operand, dim);
@@ -1421,7 +1421,7 @@ LogicalResult OpWithResultShapePerDimInterfaceOp::reifyResultShapes(
Location loc = getLoc();
shapes.reserve(getNumOperands());
for (Value operand : llvm::reverse(getOperands())) {
- auto tensorType = operand.getType().cast<RankedTensorType>();
+ auto tensorType = cast<RankedTensorType>(operand.getType());
auto currShape = llvm::to_vector<4>(llvm::map_range(
llvm::seq<int64_t>(0, tensorType.getRank()),
[&](int64_t dim) -> OpFoldResult {
@@ -1471,12 +1471,12 @@ void SideEffectOp::getEffects(
// If there is one, it is an array of dictionary attributes that hold
// information on the effects of this operation.
for (Attribute element : effectsAttr) {
- DictionaryAttr effectElement = element.cast<DictionaryAttr>();
+ DictionaryAttr effectElement = cast<DictionaryAttr>(element);
// Get the specific memory effect.
MemoryEffects::Effect *effect =
StringSwitch<MemoryEffects::Effect *>(
- effectElement.get("effect").cast<StringAttr>().getValue())
+ cast<StringAttr>(effectElement.get("effect")).getValue())
.Case("allocate", MemoryEffects::Allocate::get())
.Case("free", MemoryEffects::Free::get())
.Case("read", MemoryEffects::Read::get())
@@ -1491,7 +1491,7 @@ void SideEffectOp::getEffects(
if (effectElement.get("on_result"))
effects.emplace_back(effect, getResult(), resource);
else if (Attribute ref = effectElement.get("on_reference"))
- effects.emplace_back(effect, ref.cast<SymbolRefAttr>(), resource);
+ effects.emplace_back(effect, cast<SymbolRefAttr>(ref), resource);
else
effects.emplace_back(effect, resource);
}
@@ -1556,7 +1556,7 @@ void StringAttrPrettyNameOp::print(OpAsmPrinter &p) {
llvm::raw_svector_ostream tmpStream(resultNameStr);
p.printOperand(getResult(i), tmpStream);
- auto expectedName = getNames()[i].dyn_cast<StringAttr>();
+ auto expectedName = dyn_cast<StringAttr>(getNames()[i]);
if (!expectedName ||
tmpStream.str().drop_front() != expectedName.getValue()) {
namesDisagree = true;
@@ -1576,7 +1576,7 @@ void StringAttrPrettyNameOp::getAsmResultNames(
auto value = getNames();
for (size_t i = 0, e = value.size(); i != e; ++i)
- if (auto str = value[i].dyn_cast<StringAttr>())
+ if (auto str = dyn_cast<StringAttr>(value[i]))
if (!str.getValue().empty())
setNameFn(getResult(i), str.getValue());
}
@@ -1585,7 +1585,7 @@ void CustomResultsNameOp::getAsmResultNames(
function_ref<void(Value, StringRef)> setNameFn) {
ArrayAttr value = getNames();
for (size_t i = 0, e = value.size(); i != e; ++i)
- if (auto str = value[i].dyn_cast<StringAttr>())
+ if (auto str = dyn_cast<StringAttr>(value[i]))
if (!str.getValue().empty())
setNameFn(getResult(i), str.getValue());
}
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index adaa6e155899..a61ba8e47e3e 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -153,7 +153,7 @@ struct IncrementIntAttribute : public OpRewritePattern<AnyAttrOfOp> {
LogicalResult matchAndRewrite(AnyAttrOfOp op,
PatternRewriter &rewriter) const override {
- auto intAttr = op.getAttr().dyn_cast<IntegerAttr>();
+ auto intAttr = dyn_cast<IntegerAttr>(op.getAttr());
if (!intAttr)
return failure();
int64_t val = intAttr.getInt();
@@ -1271,11 +1271,11 @@ struct TestTypeConversionProducer
Type convertedType = getTypeConverter()
? getTypeConverter()->convertType(resultType)
: resultType;
- if (resultType.isa<FloatType>())
+ if (isa<FloatType>(resultType))
resultType = rewriter.getF64Type();
else if (resultType.isInteger(16))
resultType = rewriter.getIntegerType(64);
- else if (resultType.isa<test::TestRecursiveType>() &&
+ else if (isa<test::TestRecursiveType>(resultType) &&
convertedType != resultType)
resultType = convertedType;
else
@@ -1430,8 +1430,8 @@ struct TestTypeConversionDriver
inputs.empty())
return builder.create<TestTypeProducerOp>(loc, resultType);
// Allow producing an i64 from an integer.
- if (resultType.isa<IntegerType>() && inputs.size() == 1 &&
- inputs[0].getType().isa<IntegerType>())
+ if (isa<IntegerType>(resultType) && inputs.size() == 1 &&
+ isa<IntegerType>(inputs[0].getType()))
return builder.create<TestCastOp>(loc, resultType, inputs).getResult();
// Otherwise, fail.
return nullptr;
@@ -1440,7 +1440,7 @@ struct TestTypeConversionDriver
// Initialize the conversion target.
mlir::ConversionTarget target(getContext());
target.addDynamicallyLegalOp<TestTypeProducerOp>([](TestTypeProducerOp op) {
- auto recursiveType = op.getType().dyn_cast<test::TestRecursiveType>();
+ auto recursiveType = dyn_cast<test::TestRecursiveType>(op.getType());
return op.getType().isF64() || op.getType().isInteger(64) ||
(recursiveType &&
recursiveType.getName() == "outer_converted_type");
diff --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
index c147ff471d28..9642301e8111 100644
--- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
+++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
@@ -42,20 +42,20 @@ ConvertTosaNegateOp::matchAndRewrite(Operation *op,
auto tosaNegateOp = cast<tosa::NegateOp>(op);
auto inputType =
- tosaNegateOp.getInput1().getType().dyn_cast<mlir::RankedTensorType>();
+ dyn_cast<mlir::RankedTensorType>(tosaNegateOp.getInput1().getType());
// skip if input is not ranked tensor type
if (!inputType)
return failure();
// skip if it's not ranked tensor type.
auto outputType =
- tosaNegateOp.getResult().getType().dyn_cast<mlir::RankedTensorType>();
+ dyn_cast<mlir::RankedTensorType>(tosaNegateOp.getResult().getType());
if (!outputType)
return failure();
// skip if output is not per-tensor quantized type.
auto outputElementType =
- outputType.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
+ dyn_cast<mlir::quant::UniformQuantizedType>(outputType.getElementType());
if (!outputElementType)
return failure();
@@ -112,14 +112,14 @@ ConvertTosaConv2DOp::matchAndRewrite(Operation *op,
auto tosaConv2DOp = cast<tosa::Conv2DOp>(op);
auto inputType =
- tosaConv2DOp.getInput().getType().dyn_cast<mlir::RankedTensorType>();
+ dyn_cast<mlir::RankedTensorType>(tosaConv2DOp.getInput().getType());
// skip if input is not ranked tensor type
if (!inputType)
return failure();
auto weightType =
- tosaConv2DOp.getWeight().getType().dyn_cast<mlir::RankedTensorType>();
+ dyn_cast<mlir::RankedTensorType>(tosaConv2DOp.getWeight().getType());
// skip if wt is not ranked tensor type
if (!weightType)
@@ -127,16 +127,16 @@ ConvertTosaConv2DOp::matchAndRewrite(Operation *op,
// skip if it's not ranked tensor type.
auto outputType =
- tosaConv2DOp.getResult().getType().dyn_cast<mlir::RankedTensorType>();
+ dyn_cast<mlir::RankedTensorType>(tosaConv2DOp.getResult().getType());
if (!outputType)
return failure();
auto inputQType =
- inputType.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
+ dyn_cast<mlir::quant::UniformQuantizedType>(inputType.getElementType());
auto weightQType =
- weightType.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
+ dyn_cast<mlir::quant::UniformQuantizedType>(weightType.getElementType());
auto outputQType =
- outputType.getElementType().dyn_cast<mlir::quant::UniformQuantizedType>();
+ dyn_cast<mlir::quant::UniformQuantizedType>(outputType.getElementType());
// Works on quantized type only.
if (!(inputQType && weightQType && outputQType))
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index dd853aa1dc3c..d0c79ab98915 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -89,7 +89,7 @@ private:
auto extract = dyn_cast<ExtractStridedSliceOp>(users);
if (!extract)
return std::nullopt;
- auto vecType = extract.getResult().getType().cast<VectorType>();
+ auto vecType = cast<VectorType>(extract.getResult().getType());
if (dstVec && dstVec != vecType)
return std::nullopt;
dstVec = vecType;
@@ -430,7 +430,7 @@ static Value allocateGlobalSharedMemory(Location loc, OpBuilder &builder,
static constexpr int64_t kSharedMemorySpace = 3;
// Compute type of shared memory buffer.
MemRefType memrefType;
- if (auto vectorType = type.dyn_cast<VectorType>()) {
+ if (auto vectorType = dyn_cast<VectorType>(type)) {
memrefType =
MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {},
kSharedMemorySpace);
@@ -535,7 +535,7 @@ struct TestVectorDistribution
// Create a map (d0, d1) -> (d1) to distribute along the inner
// dimension. Once we support n-d distribution we can add more
// complex cases.
- VectorType vecType = val.getType().dyn_cast<VectorType>();
+ VectorType vecType = dyn_cast<VectorType>(val.getType());
int64_t vecRank = vecType ? vecType.getRank() : 0;
OpBuilder builder(val.getContext());
if (vecRank == 0)
@@ -642,9 +642,9 @@ struct TestCreateVectorBroadcast
if (op->getName().getStringRef() != "test_create_broadcast")
return;
auto targetShape =
- op->getResult(0).getType().cast<VectorType>().getShape();
+ cast<VectorType>(op->getResult(0).getType()).getShape();
auto arrayAttr =
- op->getAttr("broadcast_dims").cast<DenseI64ArrayAttr>().asArrayRef();
+ cast<DenseI64ArrayAttr>(op->getAttr("broadcast_dims")).asArrayRef();
llvm::SetVector<int64_t> broadcastedDims;
broadcastedDims.insert(arrayAttr.begin(), arrayAttr.end());
OpBuilder b(op);
diff --git a/mlir/test/lib/IR/TestBuiltinAttributeInterfaces.cpp b/mlir/test/lib/IR/TestBuiltinAttributeInterfaces.cpp
index 9313f403ce1c..498de3d87bd4 100644
--- a/mlir/test/lib/IR/TestBuiltinAttributeInterfaces.cpp
+++ b/mlir/test/lib/IR/TestBuiltinAttributeInterfaces.cpp
@@ -34,7 +34,7 @@ struct TestElementsAttrInterface
void runOnOperation() override {
getOperation().walk([&](Operation *op) {
for (NamedAttribute attr : op->getAttrs()) {
- auto elementsAttr = attr.getValue().dyn_cast<ElementsAttr>();
+ auto elementsAttr = dyn_cast<ElementsAttr>(attr.getValue());
if (!elementsAttr)
continue;
testElementsAttrIteration<int64_t>(op, elementsAttr, "int64_t");
diff --git a/mlir/test/lib/IR/TestDiagnostics.cpp b/mlir/test/lib/IR/TestDiagnostics.cpp
index 1f5b29d00de7..578486c0a3b1 100644
--- a/mlir/test/lib/IR/TestDiagnostics.cpp
+++ b/mlir/test/lib/IR/TestDiagnostics.cpp
@@ -36,7 +36,7 @@ struct TestDiagnosticFilterPass
// Build a diagnostic handler that has filtering capabilities.
auto filterFn = [&](Location loc) {
// Ignore non-file locations.
- FileLineColLoc fileLoc = loc.dyn_cast<FileLineColLoc>();
+ FileLineColLoc fileLoc = dyn_cast<FileLineColLoc>(loc);
if (!fileLoc)
return true;
diff --git a/mlir/test/lib/IR/TestFunc.cpp b/mlir/test/lib/IR/TestFunc.cpp
index 171d46abfde6..45897882b003 100644
--- a/mlir/test/lib/IR/TestFunc.cpp
+++ b/mlir/test/lib/IR/TestFunc.cpp
@@ -35,13 +35,13 @@ struct TestFuncInsertArg
SmallVector<Location, 4> locsToInsert;
for (auto insert : inserts.getAsRange<ArrayAttr>()) {
indicesToInsert.push_back(
- insert[0].cast<IntegerAttr>().getValue().getZExtValue());
- typesToInsert.push_back(insert[1].cast<TypeAttr>().getValue());
+ cast<IntegerAttr>(insert[0]).getValue().getZExtValue());
+ typesToInsert.push_back(cast<TypeAttr>(insert[1]).getValue());
attrsToInsert.push_back(insert.size() > 2
- ? insert[2].cast<DictionaryAttr>()
+ ? cast<DictionaryAttr>(insert[2])
: DictionaryAttr::get(&getContext()));
locsToInsert.push_back(insert.size() > 3
- ? Location(insert[3].cast<LocationAttr>())
+ ? Location(cast<LocationAttr>(insert[3]))
: unknownLoc);
}
func->removeAttr("test.insert_args");
@@ -72,10 +72,10 @@ struct TestFuncInsertResult
SmallVector<DictionaryAttr, 4> attrsToInsert;
for (auto insert : inserts.getAsRange<ArrayAttr>()) {
indicesToInsert.push_back(
- insert[0].cast<IntegerAttr>().getValue().getZExtValue());
- typesToInsert.push_back(insert[1].cast<TypeAttr>().getValue());
+ cast<IntegerAttr>(insert[0]).getValue().getZExtValue());
+ typesToInsert.push_back(cast<TypeAttr>(insert[1]).getValue());
attrsToInsert.push_back(insert.size() > 2
- ? insert[2].cast<DictionaryAttr>()
+ ? cast<DictionaryAttr>(insert[2])
: DictionaryAttr::get(&getContext()));
}
func->removeAttr("test.insert_results");
diff --git a/mlir/test/lib/IR/TestInterfaces.cpp b/mlir/test/lib/IR/TestInterfaces.cpp
index 633d5304bc9b..2dd3fe245e22 100644
--- a/mlir/test/lib/IR/TestInterfaces.cpp
+++ b/mlir/test/lib/IR/TestInterfaces.cpp
@@ -27,7 +27,7 @@ struct TestTypeInterfaces
void runOnOperation() override {
getOperation().walk([](Operation *op) {
for (Type type : op->getResultTypes()) {
- if (auto testInterface = type.dyn_cast<TestTypeInterface>()) {
+ if (auto testInterface = dyn_cast<TestTypeInterface>(type)) {
testInterface.printTypeA(op->getLoc());
testInterface.printTypeB(op->getLoc());
testInterface.printTypeC(op->getLoc());
@@ -37,7 +37,7 @@ struct TestTypeInterfaces
TestTypeInterface result = testInterface.printTypeRet(op->getLoc());
(void)result;
}
- if (auto testType = type.dyn_cast<TestType>())
+ if (auto testType = dyn_cast<TestType>(type))
testType.printTypeE(op->getLoc());
}
});
diff --git a/mlir/test/lib/IR/TestOpaqueLoc.cpp b/mlir/test/lib/IR/TestOpaqueLoc.cpp
index 977d2b001a18..c0ce8965868a 100644
--- a/mlir/test/lib/IR/TestOpaqueLoc.cpp
+++ b/mlir/test/lib/IR/TestOpaqueLoc.cpp
@@ -74,7 +74,7 @@ struct TestOpaqueLoc
ScopedDiagnosticHandler diagHandler(&getContext(), [](Diagnostic &diag) {
auto &os = llvm::outs();
- if (diag.getLocation().isa<OpaqueLoc>()) {
+ if (isa<OpaqueLoc>(diag.getLocation())) {
MyLocation *loc = OpaqueLoc::getUnderlyingLocationOrNull<MyLocation *>(
diag.getLocation());
if (loc)
diff --git a/mlir/test/lib/IR/TestPrintDefUse.cpp b/mlir/test/lib/IR/TestPrintDefUse.cpp
index 0656036731a1..5d489a342f57 100644
--- a/mlir/test/lib/IR/TestPrintDefUse.cpp
+++ b/mlir/test/lib/IR/TestPrintDefUse.cpp
@@ -34,7 +34,7 @@ struct TestPrintDefUsePass
} else {
// If there is no defining op, the Value is necessarily a Block
// argument.
- auto blockArg = operand.cast<BlockArgument>();
+ auto blockArg = cast<BlockArgument>(operand);
llvm::outs() << " - Operand produced by Block argument, number "
<< blockArg.getArgNumber() << "\n";
}
diff --git a/mlir/test/lib/Transforms/TestTopologicalSort.cpp b/mlir/test/lib/Transforms/TestTopologicalSort.cpp
index 4ad5b5c2608f..a8cc7a5af60d 100644
--- a/mlir/test/lib/Transforms/TestTopologicalSort.cpp
+++ b/mlir/test/lib/Transforms/TestTopologicalSort.cpp
@@ -42,7 +42,7 @@ struct TestTopologicalSortAnalysisPass
// If the root has an "ordered" attribute, we fill the selectedOps
// vector in a certain order.
int64_t pos =
- selected->getAttr("selected").cast<IntegerAttr>().getInt();
+ cast<IntegerAttr>(selected->getAttr("selected")).getInt();
if (pos >= static_cast<int64_t>(selectedOps.size()))
selectedOps.append(pos + 1 - selectedOps.size(), nullptr);
selectedOps[pos] = selected;