Skip to content

Commit 79b9d41

Browse files
[mlir][sparse] Generalize sparse encoding in check tests (#67476)
For all the mlir tests (except for roundtrip_coding.mlir), change the check test to use general form of encoding `#sparse_tensor.encoding<{{{.*}}}>` instead of actual encoding such as `#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>`.
1 parent 32e10aa commit 79b9d41

28 files changed

+621
-641
lines changed

mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,12 @@
2222
#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
2323

2424
// CHECK-LABEL: func.func @sparse_sampled_dd(
25-
// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>,
25+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>,
2626
// CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf64>,
27-
// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> {
27+
// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> {
2828
// CHECK: %[[VAL_3:.*]] = arith.constant 8 : index
2929
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
30-
// CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>
30+
// CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>
3131
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
3232
// CHECK: %[[VAL_7:.*]] = gpu.wait async
3333
// CHECK: %[[VAL_8:.*]], %[[VAL_9:.*]] = gpu.alloc async {{\[}}%[[VAL_7]]] () : memref<8x8xf64>
@@ -36,9 +36,9 @@
3636
// CHECK: %[[VAL_12:.*]] = gpu.wait async
3737
// CHECK: %[[VAL_13:.*]], %[[VAL_14:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] () : memref<8x8xf64>
3838
// CHECK: %[[VAL_15:.*]] = gpu.memcpy async {{\[}}%[[VAL_14]]] %[[VAL_13]], %[[VAL_11]] : memref<8x8xf64>, memref<8x8xf64>
39-
// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref<?xindex>
40-
// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref<?xindex>
41-
// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref<?xf64>
39+
// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
40+
// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
41+
// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
4242
// CHECK: %[[VAL_19:.*]] = gpu.wait async
4343
// CHECK: %[[VAL_20:.*]] = memref.dim %[[VAL_16]], %[[VAL_4]] : memref<?xindex>
4444
// CHECK: %[[VAL_21:.*]], %[[VAL_22:.*]] = gpu.alloc async {{\[}}%[[VAL_19]]] (%[[VAL_20]]) : memref<?xindex>
@@ -70,8 +70,8 @@
7070
// CHECK: %[[VAL_57:.*]] = gpu.memcpy async {{\[}}%[[VAL_56]]] %[[VAL_18]], %[[VAL_31]] : memref<?xf64>, memref<?xf64>
7171
// CHECK: %[[VAL_58:.*]] = gpu.dealloc async {{\[}}%[[VAL_57]]] %[[VAL_31]] : memref<?xf64>
7272
// CHECK: gpu.wait {{\[}}%[[VAL_58]]]
73-
// CHECK: %[[VAL_59:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>
74-
// CHECK: return %[[VAL_59]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>
73+
// CHECK: %[[VAL_59:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>
74+
// CHECK: return %[[VAL_59]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>
7575
// CHECK: }
7676
//
7777
// A kernel that computes a direct sampled matrix matrix multiplication

mlir/test/Dialect/SparseTensor/codegen.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -664,7 +664,7 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
664664
}
665665

666666
// CHECK-LABEL: func.func @sparse_new_coo(
667-
// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>) {
667+
// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{{{.*}}}>>) {
668668
// CHECK-DAG: %[[A1:.*]] = arith.constant false
669669
// CHECK-DAG: %[[A2:.*]] = arith.constant 1 : index
670670
// CHECK-DAG: %[[A3:.*]] = arith.constant 0 : index
@@ -684,7 +684,7 @@ func.func @sparse_convert_element_type(%arg0: tensor<32xf32, #SparseVector>) ->
684684
// CHECK: %[[A13:.*]] = memref.cast %[[A12]] : memref<2xindex> to memref<?xindex>
685685
// CHECK: %[[A14:.*]] = memref.alloc(%[[A11]]) : memref<?xindex>
686686
// CHECK: %[[A15:.*]] = memref.alloc(%[[A10]]) : memref<?xf32>
687-
// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>
687+
// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{{{.*}}}>>
688688
// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A16]] lvl_sz at 0 with %[[A8]]
689689
// CHECK: %[[A19:.*]] = sparse_tensor.storage_specifier.get %[[A18]] pos_mem_sz at 0
690690
// CHECK: %[[A21:.*]], %[[A22:.*]] = sparse_tensor.push_back %[[A19]], %[[A13]], %[[A3]]
@@ -712,7 +712,7 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #Coo> {
712712
}
713713

714714
// CHECK-LABEL: func.func @sparse_new_coo_permute_no(
715-
// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>) {
715+
// CHECK-SAME: %[[A0:.*]]: !llvm.ptr<i8>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{{{.*}}}>>) {
716716
// CHECK-DAG: %[[A1:.*]] = arith.constant 1 : index
717717
// CHECK-DAG: %[[A2:.*]] = arith.constant 0 : index
718718
// CHECK-DAG: %[[A3:.*]] = arith.constant 2 : index
@@ -731,7 +731,7 @@ func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #Coo> {
731731
// CHECK: %[[A12:.*]] = memref.cast %[[A11]] : memref<2xindex> to memref<?xindex>
732732
// CHECK: %[[A13:.*]] = memref.alloc(%[[A10]]) : memref<?xindex>
733733
// CHECK: %[[A14:.*]] = memref.alloc(%[[A9]]) : memref<?xf32>
734-
// CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>
734+
// CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{{{.*}}}>>
735735
// CHECK: %[[A17:.*]] = sparse_tensor.storage_specifier.set %[[A15]] lvl_sz at 0 with %[[A8]]
736736
// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.get %[[A17]] pos_mem_sz at 0
737737
// CHECK: %[[A20:.*]], %[[A21:.*]] = sparse_tensor.push_back %[[A18]], %[[A12]], %[[A2]]

mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100
111111
// CHECK: return %[[T]] : !llvm.ptr<i8>
112112

113113
// CHECK-RWT-LABEL: func.func @sparse_convert_2d(
114-
// CHECK-RWT-SAME: %[[T0:.*]]: tensor<2x4xf64>) -> tensor<2x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> {
114+
// CHECK-RWT-SAME: %[[T0:.*]]: tensor<2x4xf64>) -> tensor<2x4xf64, #sparse_tensor.encoding<{{{.*}}}>> {
115115
// CHECK-RWT: %[[T1:.*]] = bufferization.alloc_tensor()
116116
// CHECK-RWT: %[[T2:.*]] = sparse_tensor.foreach in %[[T0]] init(%[[T1]])
117117
// CHECK-RWT: ^bb0(%[[L0I0:.*]]: index, %[[L0I1:.*]]: index, %[[L0V:.*]]: f64, %[[L0T:.*]]: tensor
@@ -162,7 +162,7 @@ func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #CSR> {
162162
// CHECK: call @delSparseTensorCOOF32(%[[C]])
163163
// CHECK: return %[[T]] : !llvm.ptr<i8>
164164

165-
// CHECK-RWT-LABEL: func.func @sparse_constant() -> tensor<8x7xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> {
165+
// CHECK-RWT-LABEL: func.func @sparse_constant() -> tensor<8x7xf32, #sparse_tensor.encoding<{{{.*}}}>> {
166166
// CHECK-RWT: %[[F0:.*]] = arith.constant sparse<{{\[\[}}0, 0], [1, 6]], [1.000000e+00, 5.000000e+00]> : tensor<8x7xf32>
167167
// CHECK-RWT: %[[T0:.*]] = bufferization.alloc_tensor()
168168
// CHECK-RWT: %[[T1:.*]] = sparse_tensor.foreach in %[[F0]] init(%[[T0]])

mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ func.func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<
144144
// CHECK: return %[[T]] : tensor<2x4xf64>
145145

146146
// CHECK-RWT-LABEL: func.func @sparse_convert_2d(
147-
// CHECK-RWT-SAME: %[[A:.*]]: tensor<2x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>) -> tensor<2x4xf64> {
147+
// CHECK-RWT-SAME: %[[A:.*]]: tensor<2x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<2x4xf64> {
148148
// CHECK-RWT: %[[F0:.*]] = arith.constant 0.000000e+00 : f64
149149
// CHECK-RWT: %[[B:.*]] = memref.alloc() : memref<2x4xf64>
150150
// CHECK-RWT: linalg.fill ins(%[[F0]] : f64) outs(%[[B]]
@@ -300,7 +300,7 @@ func.func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tens
300300
// CHECK: return %[[T]] : tensor<?x?xf64>
301301

302302
// CHECK-RWT-LABEL: func.func @sparse_convert_2d_dyn2(
303-
// CHECK-RWT-SAME: %[[A:.*]]: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>) -> tensor<?x?xf64> {
303+
// CHECK-RWT-SAME: %[[A:.*]]: tensor<?x?xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<?x?xf64> {
304304
// CHECK-RWT-DAG: %[[C0:.*]] = arith.constant 0 : index
305305
// CHECK-RWT-DAG: %[[C1:.*]] = arith.constant 1 : index
306306
// CHECK-RWT-DAG: %[[F0:.*]] = arith.constant 0.000000e+00 : f64

mlir/test/Dialect/SparseTensor/one_trip.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@
1313
}
1414

1515
// CHECK-LABEL: func.func @sparse_scale(
16-
// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>>)
16+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1xf32, #sparse_tensor.encoding<{{{.*}}}>>)
1717
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index
1818
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f32
19-
// CHECK: %[[VAL_3:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>> to memref<?xf32>
19+
// CHECK: %[[VAL_3:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
2020
// CHECK: %[[VAL_4:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<?xf32>
2121
// CHECK: %[[VAL_5:.*]] = arith.mulf %[[VAL_4]], %[[VAL_2]] : f32
2222
// CHECK: memref.store %[[VAL_5]], %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<?xf32>
23-
// CHECK: %[[VAL_6:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>>
24-
// CHECK: return %[[VAL_6]] : tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>>
23+
// CHECK: %[[VAL_6:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{{{.*}}}>>
24+
// CHECK: return %[[VAL_6]] : tensor<1x1xf32, #sparse_tensor.encoding<{{{.*}}}>>
2525
func.func @sparse_scale(%argx: tensor<1x1xf32, #Dense>) -> tensor<1x1xf32, #Dense> {
2626
%c = arith.constant 2.0 : f32
2727
%0 = linalg.generic #trait_scale

mlir/test/Dialect/SparseTensor/rejected.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
// CHECK-LABEL: func.func @sparse_reduction_subi(
1717
// CHECK-SAME: %[[VAL_0:.*]]: tensor<i32>,
18-
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) -> tensor<i32> {
18+
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?xi32, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<i32> {
1919
// CHECK: %[[VAL_2:.*]] = linalg.generic
2020
// CHECK: ^bb0(%[[VAL_3:.*]]: i32, %[[VAL_4:.*]]: i32):
2121
// CHECK: %[[VAL_5:.*]] = arith.subi %[[VAL_3]], %[[VAL_4]] : i32

mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414
}>
1515

1616
// CHECK-LABEL: func.func @sparse_new(
17-
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> {
18-
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
17+
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>> {
18+
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
1919
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
2020
// CHECK: bufferization.dealloc_tensor %[[COO]]
2121
// CHECK: return %[[R]]
@@ -25,8 +25,8 @@ func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSR> {
2525
}
2626

2727
// CHECK-LABEL: func.func @sparse_new_csc(
28-
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> {
29-
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>
28+
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>> {
29+
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
3030
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
3131
// CHECK: bufferization.dealloc_tensor %[[COO]]
3232
// CHECK: return %[[R]]
@@ -36,16 +36,16 @@ func.func @sparse_new_csc(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSC> {
3636
}
3737

3838
// CHECK-LABEL: func.func @sparse_new_coo(
39-
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> {
40-
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
39+
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>> {
40+
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
4141
// CHECK: return %[[COO]]
4242
func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #COO> {
4343
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #COO>
4444
return %0 : tensor<?x?xf32, #COO>
4545
}
4646

4747
// CHECK-LABEL: func.func @sparse_out(
48-
// CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>,
48+
// CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{{{.*}}}>>,
4949
// CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>) {
5050
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
5151
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index

0 commit comments

Comments
 (0)