summaryrefslogtreecommitdiff
path: root/mlir/test/Dialect/SparseTensor/codegen.mlir
diff options
context:
space:
mode:
authorwren romano <2998727+wrengr@users.noreply.github.com>2023-05-17 13:09:53 -0700
committerwren romano <2998727+wrengr@users.noreply.github.com>2023-05-17 14:24:09 -0700
commita0615d020a02e252196383439e2c8143c6525e05 (patch)
treeaa308ef0e4c62d7dba3450f0eb4f8f1dffc0f57c /mlir/test/Dialect/SparseTensor/codegen.mlir
parent4dc205f016e3dd2eb1182886a77676f24e39e329 (diff)
downloadllvm-a0615d020a02e252196383439e2c8143c6525e05.tar.gz
[mlir][sparse] Renaming the STEA field `dimLevelType` to `lvlTypes`
This commit is part of the migration of towards the new STEA syntax/design. In particular, this commit includes the following changes: * Renaming compiler-internal functions/methods: * `SparseTensorEncodingAttr::{getDimLevelType => getLvlTypes}` * `Merger::{getDimLevelType => getLvlType}` (for consistency) * `sparse_tensor::{getDimLevelType => buildLevelType}` (to help reduce confusion vs actual getter methods) * Renaming external facets to match: * the STEA parser and printer * the C and Python bindings * PyTACO However, the actual renaming of the `DimLevelType` itself (along with all the "dlt" names) will be handled in a separate commit. Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D150330
Diffstat (limited to 'mlir/test/Dialect/SparseTensor/codegen.mlir')
-rw-r--r--mlir/test/Dialect/SparseTensor/codegen.mlir32
1 files changed, 16 insertions, 16 deletions
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 4a5421265737..243f3ae4513e 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -1,62 +1,62 @@
// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize -cse | FileCheck %s
-#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
+#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>
#SparseVector = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed" ],
+ lvlTypes = [ "compressed" ],
crdWidth = 64,
posWidth = 32
}>
#Dense2D = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "dense" ],
+ lvlTypes = [ "dense", "dense" ],
crdWidth = 64,
posWidth = 32
}>
#Row = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed", "dense" ],
+ lvlTypes = [ "compressed", "dense" ],
crdWidth = 64,
posWidth = 32
}>
#CSR = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "compressed" ],
+ lvlTypes = [ "dense", "compressed" ],
crdWidth = 64,
posWidth = 32
}>
#UCSR = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "compressed-no" ]
+ lvlTypes = [ "dense", "compressed-no" ]
}>
#CSC = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "compressed" ],
+ lvlTypes = [ "dense", "compressed" ],
dimOrdering = affine_map<(i, j) -> (j, i)>
}>
#DCSR = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed", "compressed" ],
+ lvlTypes = [ "compressed", "compressed" ],
crdWidth = 64,
posWidth = 32
}>
#Dense3D = #sparse_tensor.encoding<{
- dimLevelType = [ "dense", "dense", "dense" ],
+ lvlTypes = [ "dense", "dense", "dense" ],
dimOrdering = affine_map<(i, j, k) -> (k, i, j)>
}>
#Coo = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed-nu", "singleton" ]
}>
#CooPNo = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed-nu", "singleton-no" ],
+ lvlTypes = [ "compressed-nu", "singleton-no" ],
dimOrdering = affine_map<(i, j) -> (j, i)>
}>
#ccoo = #sparse_tensor.encoding<{
- dimLevelType = [ "compressed", "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed", "compressed-nu", "singleton" ]
}>
// CHECK-LABEL: func @sparse_nop(
@@ -680,7 +680,7 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
}
// CHECK-LABEL: func.func @sparse_new_coo(
-// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>>) {
+// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>) {
// CHECK-DAG: %[[A1:.*]] = arith.constant false
// CHECK-DAG: %[[A2:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[A3:.*]] = arith.constant 0 : index
@@ -697,7 +697,7 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
// CHECK: %[[A13:.*]] = memref.cast %[[A12]] : memref<2xindex> to memref<?xindex>
// CHECK: %[[A14:.*]] = memref.alloc(%[[A11]]) : memref<?xindex>
// CHECK: %[[A15:.*]] = memref.alloc(%[[A10]]) : memref<?xf32>
-// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>>
+// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>
// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A16]] lvl_sz at 0 with %[[A8]]
// CHECK: %[[A19:.*]] = sparse_tensor.storage_specifier.get %[[A18]] pos_mem_sz at 0
// CHECK: %[[A21:.*]], %[[A22:.*]] = sparse_tensor.push_back %[[A19]], %[[A13]], %[[A3]]
@@ -725,7 +725,7 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #Coo> {
}
// CHECK-LABEL: func.func @sparse_new_coo_permute_no(
-// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>>) {
+// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>) {
// CHECK-DAG: %[[A1:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[A2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[A3:.*]] = arith.constant 2 : index
@@ -741,7 +741,7 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #Coo> {
// CHECK: %[[A12:.*]] = memref.cast %[[A11]] : memref<2xindex> to memref<?xindex>
// CHECK: %[[A13:.*]] = memref.alloc(%[[A10]]) : memref<?xindex>
// CHECK: %[[A14:.*]] = memref.alloc(%[[A9]]) : memref<?xf32>
-// CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>>
+// CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>
// CHECK: %[[A17:.*]] = sparse_tensor.storage_specifier.set %[[A15]] lvl_sz at 0 with %[[A8]]
// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.get %[[A17]] pos_mem_sz at 0
// CHECK: %[[A20:.*]], %[[A21:.*]] = sparse_tensor.push_back %[[A18]], %[[A12]], %[[A2]]