summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAart Bik <ajcbik@google.com>2021-09-24 13:15:17 -0700
committerAart Bik <ajcbik@google.com>2021-09-27 11:22:05 -0700
commitec97a205c3cc5f1b407f18bb642c15bb8cfcb276 (patch)
tree5652b0a52ae5b9c6a64e5b7fb20416d6109a10ff
parentef0f728abe6ec43f2f75082c9b47ec7fade2ead2 (diff)
downloadllvm-ec97a205c3cc5f1b407f18bb642c15bb8cfcb276.tar.gz
[mlir][sparse] preserve zero-initialization for materializing buffers
This revision makes sure that when the output buffer materializes locally (in contrast with the passing in of output tensors either in-place or not in-place), the zero initialization assumption is preserved. This also adds a bit more documentation on our sparse kernel assumption (viz. TACO assumptions). Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D110442
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp18
-rw-r--r--mlir/test/Dialect/SparseTensor/sparse_1d.mlir30
2 files changed, 46 insertions, 2 deletions
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 28fb3a69c3be..60272e240422 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -389,7 +389,12 @@ static bool getInPlace(Value val) {
return false;
}
-/// Generates buffer for the output tensor.
+/// Generates buffer for the output tensor. Note that all sparse kernels
+/// assume that when all elements are written to (viz. x(i) = y(i) * z(i)),
+/// the output buffer is already initialized to all zeroes and only nonzeroes
+/// values are computed and written out. For updates (viz. x(i) += y(i) * z(i)),
+/// only nonzeroes values are used for the updates and no assumption on the
+/// original contents of the output buffer is necessary..
static Value genOutputBuffer(CodeGen &codegen, PatternRewriter &rewriter,
linalg::GenericOp op, MemRefType denseTp,
ArrayRef<Value> args) {
@@ -404,7 +409,16 @@ static Value genOutputBuffer(CodeGen &codegen, PatternRewriter &rewriter,
// By default, a new buffer is allocated which is initialized to the
// tensor defined in the outs() clause. This is always correct but
// introduces a dense initialization component that may negatively
- // impact the running complexity of the sparse kernel.
+ // impact the running complexity of the sparse kernel. If the tensor
+ // materializes within this method, we need to preserve the zero
+ // initialization assumption of all sparse output buffers.
+ if (auto init = tensor.getDefiningOp<linalg::InitTensorOp>()) {
+ Type tp = denseTp.getElementType();
+ Value alloc = rewriter.create<memref::AllocOp>(loc, denseTp, args);
+ Value zero = rewriter.create<ConstantOp>(loc, tp, rewriter.getZeroAttr(tp));
+ rewriter.create<linalg::FillOp>(loc, zero, alloc);
+ return alloc;
+ }
Value init = rewriter.create<memref::BufferCastOp>(loc, denseTp, tensor);
Value alloc = rewriter.create<memref::AllocOp>(loc, denseTp, args);
rewriter.create<memref::CopyOp>(loc, init, alloc);
diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
index 47ee7005881d..ce436879cea0 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
@@ -43,6 +43,36 @@ func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> te
return %0 : tensor<32xf32>
}
+// CHECK-LABEL: func @add_d_init(
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>>,
+// CHECK-SAME: %[[VAL_1:.*]]: f32) -> tensor<32xf32> {
+// CHECK: %[[VAL_2:.*]] = constant 32 : index
+// CHECK: %[[VAL_3:.*]] = constant 0.000000e+00 : f32
+// CHECK: %[[VAL_4:.*]] = constant 0 : index
+// CHECK: %[[VAL_5:.*]] = constant 1 : index
+// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xf32>
+// CHECK: %[[VAL_7:.*]] = memref.alloc() : memref<32xf32>
+// CHECK: linalg.fill(%[[VAL_3]], %[[VAL_7]]) : f32, memref<32xf32>
+// CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] {
+// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_8]]] : memref<?xf32>
+// CHECK: %[[VAL_10:.*]] = addf %[[VAL_9]], %[[VAL_1]] : f32
+// CHECK: memref.store %[[VAL_10]], %[[VAL_7]]{{\[}}%[[VAL_8]]] : memref<32xf32>
+// CHECK: }
+// CHECK: %[[VAL_11:.*]] = memref.tensor_load %[[VAL_7]] : memref<32xf32>
+// CHECK: return %[[VAL_11]] : tensor<32xf32>
+// CHECK: }
+func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> {
+ %u = linalg.init_tensor [32] : tensor<32xf32>
+ %0 = linalg.generic #trait1
+ ins(%arga: tensor<32xf32, #DV>)
+ outs(%u: tensor<32xf32>) {
+ ^bb(%a: f32, %x: f32):
+ %0 = addf %a, %argb : f32
+ linalg.yield %0 : f32
+ } -> tensor<32xf32>
+ return %0 : tensor<32xf32>
+}
+
// CHECK-LABEL: func @mul_d(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ], pointerBitWidth = 0, indexBitWidth = 0 }>>,
// CHECK-SAME: %[[VAL_1:.*]]: f32,