diff options
author | River Riddle <riddleriver@gmail.com> | 2022-02-02 10:05:01 -0800 |
---|---|---|
committer | River Riddle <riddleriver@gmail.com> | 2022-02-02 13:34:28 -0800 |
commit | 4e190c58de7b52a0d6b2052a96c6f5933cb50b47 (patch) | |
tree | 94dc63b66e68962a3073ca860ca6e6c40583b75a /mlir/examples/toy/Ch5 | |
parent | 42e5f1d97b3ecf6f967a0e63ca39f05d3262e2b2 (diff) | |
download | llvm-4e190c58de7b52a0d6b2052a96c6f5933cb50b47.tar.gz |
[mlir][NFC] Update Toy operations to use `hasVerifier` instead of `verifier`
The verifier field is deprecated, and slated for removal.
Differential Revision: https://reviews.llvm.org/D118816
Diffstat (limited to 'mlir/examples/toy/Ch5')
-rw-r--r-- | mlir/examples/toy/Ch5/include/toy/Ops.td | 12 | ||||
-rw-r--r-- | mlir/examples/toy/Ch5/mlir/Dialect.cpp | 52 |
2 files changed, 31 insertions, 33 deletions
diff --git a/mlir/examples/toy/Ch5/include/toy/Ops.td b/mlir/examples/toy/Ch5/include/toy/Ops.td index ea7689cf8acc..22f96abeb02c 100644 --- a/mlir/examples/toy/Ch5/include/toy/Ops.td +++ b/mlir/examples/toy/Ch5/include/toy/Ops.td @@ -78,8 +78,8 @@ def ConstantOp : Toy_Op<"constant", [NoSideEffect]> { OpBuilder<(ins "double":$value)> ]; - // Invoke a static verify method to verify this constant operation. - let verifier = [{ return ::verify(*this); }]; + // Indicate that additional verification for this operation is necessary. + let hasVerifier = 1; } def AddOp : Toy_Op<"add", @@ -253,8 +253,8 @@ def ReturnOp : Toy_Op<"return", [NoSideEffect, HasParent<"FuncOp">, bool hasOperand() { return getNumOperands() != 0; } }]; - // Invoke a static verify method to verify this return operation. - let verifier = [{ return ::verify(*this); }]; + // Indicate that additional verification for this operation is necessary. + let hasVerifier = 1; } def TransposeOp : Toy_Op<"transpose", @@ -276,8 +276,8 @@ def TransposeOp : Toy_Op<"transpose", OpBuilder<(ins "Value":$input)> ]; - // Invoke a static verify method to verify this transpose operation. - let verifier = [{ return ::verify(*this); }]; + // Indicate that additional verification for this operation is necessary. + let hasVerifier = 1; } #endif // TOY_OPS diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp index f4ded4956c97..bf1d2a581d03 100644 --- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp @@ -182,29 +182,28 @@ static void print(mlir::OpAsmPrinter &printer, ConstantOp op) { printer << op.value(); } -/// Verifier for the constant operation. This corresponds to the `::verify(...)` -/// in the op definition. -static mlir::LogicalResult verify(ConstantOp op) { +/// Verifier for the constant operation. This corresponds to the +/// `let hasVerifier = 1` in the op definition. +mlir::LogicalResult ConstantOp::verify() { // If the return type of the constant is not an unranked tensor, the shape // must match the shape of the attribute holding the data. - auto resultType = op.getResult().getType().dyn_cast<mlir::RankedTensorType>(); + auto resultType = getResult().getType().dyn_cast<mlir::RankedTensorType>(); if (!resultType) return success(); // Check that the rank of the attribute type matches the rank of the constant // result type. - auto attrType = op.value().getType().cast<mlir::TensorType>(); + auto attrType = value().getType().cast<mlir::TensorType>(); if (attrType.getRank() != resultType.getRank()) { - return op.emitOpError( - "return type must match the one of the attached value " - "attribute: ") + return emitOpError("return type must match the one of the attached value " + "attribute: ") << attrType.getRank() << " != " << resultType.getRank(); } // Check that each of the dimensions match between the two types. for (int dim = 0, dimE = attrType.getRank(); dim < dimE; ++dim) { if (attrType.getShape()[dim] != resultType.getShape()[dim]) { - return op.emitOpError( + return emitOpError( "return type shape mismatches its attribute at dimension ") << dim << ": " << attrType.getShape()[dim] << " != " << resultType.getShape()[dim]; @@ -286,28 +285,27 @@ void MulOp::inferShapes() { getResult().setType(getOperand(0).getType()); } //===----------------------------------------------------------------------===// // ReturnOp -static mlir::LogicalResult verify(ReturnOp op) { +mlir::LogicalResult ReturnOp::verify() { // We know that the parent operation is a function, because of the 'HasParent' // trait attached to the operation definition. - auto function = cast<FuncOp>(op->getParentOp()); + auto function = cast<FuncOp>((*this)->getParentOp()); /// ReturnOps can only have a single optional operand. - if (op.getNumOperands() > 1) - return op.emitOpError() << "expects at most 1 return operand"; + if (getNumOperands() > 1) + return emitOpError() << "expects at most 1 return operand"; // The operand number and types must match the function signature. const auto &results = function.getType().getResults(); - if (op.getNumOperands() != results.size()) - return op.emitOpError() - << "does not return the same number of values (" - << op.getNumOperands() << ") as the enclosing function (" - << results.size() << ")"; + if (getNumOperands() != results.size()) + return emitOpError() << "does not return the same number of values (" + << getNumOperands() << ") as the enclosing function (" + << results.size() << ")"; // If the operation does not have an input, we are done. - if (!op.hasOperand()) + if (!hasOperand()) return mlir::success(); - auto inputType = *op.operand_type_begin(); + auto inputType = *operand_type_begin(); auto resultType = results.front(); // Check that the result type of the function matches the operand type. @@ -315,9 +313,9 @@ static mlir::LogicalResult verify(ReturnOp op) { resultType.isa<mlir::UnrankedTensorType>()) return mlir::success(); - return op.emitError() << "type of return operand (" << inputType - << ") doesn't match function result type (" - << resultType << ")"; + return emitError() << "type of return operand (" << inputType + << ") doesn't match function result type (" << resultType + << ")"; } //===----------------------------------------------------------------------===// @@ -335,16 +333,16 @@ void TransposeOp::inferShapes() { getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType())); } -static mlir::LogicalResult verify(TransposeOp op) { - auto inputType = op.getOperand().getType().dyn_cast<RankedTensorType>(); - auto resultType = op.getType().dyn_cast<RankedTensorType>(); +mlir::LogicalResult TransposeOp::verify() { + auto inputType = getOperand().getType().dyn_cast<RankedTensorType>(); + auto resultType = getType().dyn_cast<RankedTensorType>(); if (!inputType || !resultType) return mlir::success(); auto inputShape = inputType.getShape(); if (!std::equal(inputShape.begin(), inputShape.end(), resultType.getShape().rbegin())) { - return op.emitError() + return emitError() << "expected result shape to be a transpose of the input"; } return mlir::success(); |