Skip to content

Commit c1ac9a0

Browse files
[mlir][sparse] Finish migrating integration tests to use sparse_tensor.print (#84997)
1 parent dcd9f49 commit c1ac9a0

19 files changed

+670
-533
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir

Lines changed: 165 additions & 202 deletions
Large diffs are not rendered by default.

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_block.mlir

Lines changed: 43 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -52,21 +52,10 @@
5252
// Integration test that tests conversions between sparse tensors.
5353
//
5454
module {
55-
//
56-
// Output utilities.
57-
//
58-
func.func @dumpf64(%arg0: memref<?xf64>) {
59-
%c0 = arith.constant 0 : index
60-
%d0 = arith.constant -1.0 : f64
61-
%0 = vector.transfer_read %arg0[%c0], %d0: memref<?xf64>, vector<8xf64>
62-
vector.print %0 : vector<8xf64>
63-
return
64-
}
65-
6655
//
6756
// Main driver.
6857
//
69-
func.func @entry() {
58+
func.func @main() {
7059
%c0 = arith.constant 0 : index
7160
%c1 = arith.constant 1 : index
7261
%c2 = arith.constant 2 : index
@@ -88,20 +77,47 @@ module {
8877
%3 = sparse_tensor.convert %1 : tensor<2x4xf64, #BSR> to tensor<2x4xf64, #CSR>
8978
%4 = sparse_tensor.convert %1 : tensor<2x4xf64, #BSR> to tensor<2x4xf64, #CSC>
9079

91-
%v1 = sparse_tensor.values %1 : tensor<2x4xf64, #BSR> to memref<?xf64>
92-
%v2 = sparse_tensor.values %2 : tensor<2x4xf64, #BSR> to memref<?xf64>
93-
%v3 = sparse_tensor.values %3 : tensor<2x4xf64, #CSR> to memref<?xf64>
94-
%v4 = sparse_tensor.values %4 : tensor<2x4xf64, #CSC> to memref<?xf64>
95-
96-
97-
// CHECK: ( 1, 2, 5, 6, 3, 4, 7, 8 )
98-
// CHECK-NEXT: ( 1, 2, 5, 6, 3, 4, 7, 8 )
99-
// CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8 )
100-
// CHECK-NEXT: ( 1, 5, 2, 6, 3, 7, 4, 8 )
101-
call @dumpf64(%v1) : (memref<?xf64>) -> ()
102-
call @dumpf64(%v2) : (memref<?xf64>) -> ()
103-
call @dumpf64(%v3) : (memref<?xf64>) -> ()
104-
call @dumpf64(%v4) : (memref<?xf64>) -> ()
80+
//
81+
// CHECK: ---- Sparse Tensor ----
82+
// CHECK-NEXT: nse = 8
83+
// CHECK-NEXT: dim = ( 2, 4 )
84+
// CHECK-NEXT: lvl = ( 1, 2, 2, 2 )
85+
// CHECK-NEXT: pos[1] : ( 0, 2
86+
// CHECK-NEXT: crd[1] : ( 0, 1
87+
// CHECK-NEXT: values : ( 1, 2, 5, 6, 3, 4, 7, 8
88+
// CHECK-NEXT: ----
89+
//
90+
// CHECK: ---- Sparse Tensor ----
91+
// CHECK-NEXT: nse = 8
92+
// CHECK-NEXT: dim = ( 2, 4 )
93+
// CHECK-NEXT: lvl = ( 1, 2, 2, 2 )
94+
// CHECK-NEXT: pos[1] : ( 0, 2
95+
// CHECK-NEXT: crd[1] : ( 0, 1
96+
// CHECK-NEXT: values : ( 1, 2, 5, 6, 3, 4, 7, 8
97+
// CHECK-NEXT: ----
98+
//
99+
// CHECK: ---- Sparse Tensor ----
100+
// CHECK-NEXT: nse = 8
101+
// CHECK-NEXT: dim = ( 2, 4 )
102+
// CHECK-NEXT: lvl = ( 2, 4 )
103+
// CHECK-NEXT: pos[1] : ( 0, 4, 8
104+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3
105+
// CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
106+
// CHECK-NEXT: ----
107+
//
108+
// CHECK: ---- Sparse Tensor ----
109+
// CHECK-NEXT: nse = 8
110+
// CHECK-NEXT: dim = ( 2, 4 )
111+
// CHECK-NEXT: lvl = ( 4, 2 )
112+
// CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8
113+
// CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1
114+
// CHECK-NEXT: values : ( 1, 5, 2, 6, 3, 7, 4, 8
115+
// CHECK-NEXT: ----
116+
//
117+
sparse_tensor.print %1 : tensor<2x4xf64, #BSR>
118+
sparse_tensor.print %2 : tensor<2x4xf64, #BSR>
119+
sparse_tensor.print %3 : tensor<2x4xf64, #CSR>
120+
sparse_tensor.print %4 : tensor<2x4xf64, #CSC>
105121

106122
// TODO: Fix memory leaks.
107123
bufferization.dealloc_tensor %1 : tensor<2x4xf64, #BSR>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir

Lines changed: 73 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -44,19 +44,7 @@
4444
// may change (the actual underlying sizes obviously never change).
4545
//
4646
module {
47-
48-
func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
49-
50-
//
51-
// Helper method to print values array. The transfer actually
52-
// reads more than required to verify size of buffer as well.
53-
//
54-
func.func @dump(%arg0: memref<?xf64>) {
55-
call @printMemref1dF64(%arg0) : (memref<?xf64>) -> ()
56-
return
57-
}
58-
59-
func.func @entry() {
47+
func.func @main() {
6048
%t1 = arith.constant sparse<
6149
[ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],
6250
[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64>
@@ -72,45 +60,81 @@ module {
7260
%5 = sparse_tensor.convert %3 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64, #DCSC>
7361
%6 = sparse_tensor.convert %4 : tensor<?x?xf64, #DCSC> to tensor<?x?xf64, #DCSR>
7462

75-
//
76-
// Check number_of_entries.
7763
//
78-
// CHECK-COUNT-6: 7
79-
%n1 = sparse_tensor.number_of_entries %1 : tensor<?x?xf64, #DCSR>
80-
%n2 = sparse_tensor.number_of_entries %2 : tensor<?x?xf64, #DCSC>
81-
%n3 = sparse_tensor.number_of_entries %3 : tensor<?x?xf64, #DCSR>
82-
%n4 = sparse_tensor.number_of_entries %4 : tensor<?x?xf64, #DCSC>
83-
%n5 = sparse_tensor.number_of_entries %5 : tensor<?x?xf64, #DCSC>
84-
%n6 = sparse_tensor.number_of_entries %6 : tensor<?x?xf64, #DCSR>
85-
vector.print %n1 : index
86-
vector.print %n2 : index
87-
vector.print %n3 : index
88-
vector.print %n4 : index
89-
vector.print %n5 : index
90-
vector.print %n6 : index
91-
64+
// Verify the outputs.
65+
//
66+
// CHECK: ---- Sparse Tensor ----
67+
// CHECK-NEXT: nse = 7
68+
// CHECK-NEXT: dim = ( 32, 64 )
69+
// CHECK-NEXT: lvl = ( 32, 64 )
70+
// CHECK-NEXT: pos[0] : ( 0, 3
71+
// CHECK-NEXT: crd[0] : ( 0, 1, 31
72+
// CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
73+
// CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
74+
// CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
75+
// CHECK-NEXT: ----
76+
//
77+
// CHECK: ---- Sparse Tensor ----
78+
// CHECK-NEXT: nse = 7
79+
// CHECK-NEXT: dim = ( 32, 64 )
80+
// CHECK-NEXT: lvl = ( 64, 32 )
81+
// CHECK-NEXT: pos[0] : ( 0, 3
82+
// CHECK-NEXT: crd[0] : ( 0, 1, 63
83+
// CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
84+
// CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
85+
// CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
86+
// CHECK-NEXT: ----
87+
//
88+
// CHECK: ---- Sparse Tensor ----
89+
// CHECK-NEXT: nse = 7
90+
// CHECK-NEXT: dim = ( 32, 64 )
91+
// CHECK-NEXT: lvl = ( 32, 64 )
92+
// CHECK-NEXT: pos[0] : ( 0, 3
93+
// CHECK-NEXT: crd[0] : ( 0, 1, 31
94+
// CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
95+
// CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
96+
// CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
97+
// CHECK-NEXT: ----
98+
//
99+
// CHECK: ---- Sparse Tensor ----
100+
// CHECK-NEXT: nse = 7
101+
// CHECK-NEXT: dim = ( 32, 64 )
102+
// CHECK-NEXT: lvl = ( 64, 32 )
103+
// CHECK-NEXT: pos[0] : ( 0, 3
104+
// CHECK-NEXT: crd[0] : ( 0, 1, 63
105+
// CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
106+
// CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
107+
// CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
108+
// CHECK-NEXT: ----
92109
//
93-
// All proper row-/column-wise?
110+
// CHECK: ---- Sparse Tensor ----
111+
// CHECK-NEXT: nse = 7
112+
// CHECK-NEXT: dim = ( 32, 64 )
113+
// CHECK-NEXT: lvl = ( 64, 32 )
114+
// CHECK-NEXT: pos[0] : ( 0, 3
115+
// CHECK-NEXT: crd[0] : ( 0, 1, 63
116+
// CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
117+
// CHECK-NEXT: crd[1] : ( 0, 1, 31, 0, 1, 0, 31
118+
// CHECK-NEXT: values : ( 1, 4, 6, 2, 5, 3, 7
119+
// CHECK-NEXT: ----
94120
//
95-
// CHECK: [1, 2, 3, 4, 5, 6, 7
96-
// CHECK: [1, 4, 6, 2, 5, 3, 7
97-
// CHECK: [1, 2, 3, 4, 5, 6, 7
98-
// CHECK: [1, 4, 6, 2, 5, 3, 7
99-
// CHECK: [1, 4, 6, 2, 5, 3, 7
100-
// CHECK: [1, 2, 3, 4, 5, 6, 7
121+
// CHECK: ---- Sparse Tensor ----
122+
// CHECK-NEXT: nse = 7
123+
// CHECK-NEXT: dim = ( 32, 64 )
124+
// CHECK-NEXT: lvl = ( 32, 64 )
125+
// CHECK-NEXT: pos[0] : ( 0, 3
126+
// CHECK-NEXT: crd[0] : ( 0, 1, 31
127+
// CHECK-NEXT: pos[1] : ( 0, 3, 5, 7
128+
// CHECK-NEXT: crd[1] : ( 0, 1, 63, 0, 1, 0, 63
129+
// CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
130+
// CHECK-NEXT: ----
101131
//
102-
%m1 = sparse_tensor.values %1 : tensor<?x?xf64, #DCSR> to memref<?xf64>
103-
%m2 = sparse_tensor.values %2 : tensor<?x?xf64, #DCSC> to memref<?xf64>
104-
%m3 = sparse_tensor.values %3 : tensor<?x?xf64, #DCSR> to memref<?xf64>
105-
%m4 = sparse_tensor.values %4 : tensor<?x?xf64, #DCSC> to memref<?xf64>
106-
%m5 = sparse_tensor.values %5 : tensor<?x?xf64, #DCSC> to memref<?xf64>
107-
%m6 = sparse_tensor.values %6 : tensor<?x?xf64, #DCSR> to memref<?xf64>
108-
call @dump(%m1) : (memref<?xf64>) -> ()
109-
call @dump(%m2) : (memref<?xf64>) -> ()
110-
call @dump(%m3) : (memref<?xf64>) -> ()
111-
call @dump(%m4) : (memref<?xf64>) -> ()
112-
call @dump(%m5) : (memref<?xf64>) -> ()
113-
call @dump(%m6) : (memref<?xf64>) -> ()
132+
sparse_tensor.print %1 : tensor<?x?xf64, #DCSR>
133+
sparse_tensor.print %2 : tensor<?x?xf64, #DCSC>
134+
sparse_tensor.print %3 : tensor<?x?xf64, #DCSR>
135+
sparse_tensor.print %4 : tensor<?x?xf64, #DCSC>
136+
sparse_tensor.print %5 : tensor<?x?xf64, #DCSC>
137+
sparse_tensor.print %6 : tensor<?x?xf64, #DCSR>
114138

115139
// Release the resources.
116140
bufferization.dealloc_tensor %1 : tensor<?x?xf64, #DCSR>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -55,7 +55,7 @@ module {
5555
//
5656
// The first test suite (for non-singleton LevelTypes).
5757
//
58-
func.func @entry() {
58+
func.func @main() {
5959
//
6060
// Initialize a 3-dim dense tensor.
6161
//

0 commit comments

Comments
 (0)