summaryrefslogtreecommitdiff
path: root/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
diff options
context:
space:
mode:
authorwren romano <2998727+wrengr@users.noreply.github.com>2023-05-17 13:09:53 -0700
committerwren romano <2998727+wrengr@users.noreply.github.com>2023-05-17 14:24:09 -0700
commita0615d020a02e252196383439e2c8143c6525e05 (patch)
treeaa308ef0e4c62d7dba3450f0eb4f8f1dffc0f57c /mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
parent4dc205f016e3dd2eb1182886a77676f24e39e329 (diff)
downloadllvm-a0615d020a02e252196383439e2c8143c6525e05.tar.gz
[mlir][sparse] Renaming the STEA field `dimLevelType` to `lvlTypes`
This commit is part of the migration of towards the new STEA syntax/design. In particular, this commit includes the following changes: * Renaming compiler-internal functions/methods: * `SparseTensorEncodingAttr::{getDimLevelType => getLvlTypes}` * `Merger::{getDimLevelType => getLvlType}` (for consistency) * `sparse_tensor::{getDimLevelType => buildLevelType}` (to help reduce confusion vs actual getter methods) * Renaming external facets to match: * the STEA parser and printer * the C and Python bindings * PyTACO However, the actual renaming of the `DimLevelType` itself (along with all the "dlt" names) will be handled in a separate commit. Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D150330
Diffstat (limited to 'mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir')
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir12
1 files changed, 6 insertions, 6 deletions
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
index d51374b1fe3f..43b75f8aa2fe 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -11,30 +11,30 @@
// TODO: support slices on lib path
#CSR = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "compressed" ]
+ lvlTypes = [ "dense", "compressed" ]
}>
#CSR_SLICE = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "compressed" ],
+ lvlTypes = [ "dense", "compressed" ],
slice = [ (1, 4, 1), (1, 4, 2) ]
}>
#CSR_SLICE_DYN = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "compressed" ],
+ lvlTypes = [ "dense", "compressed" ],
slice = [ (?, ?, ?), (?, ?, ?) ]
}>
#COO = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed-nu", "singleton" ]
}>
#COO_SLICE = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed-nu", "singleton" ],
slice = [ (1, 4, 1), (1, 4, 2) ]
}>
#COO_SLICE_DYN = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed-nu", "singleton" ],
slice = [ (?, ?, ?), (?, ?, ?) ]
}>