Skip to content

Commit b6ca602

Browse files
authored
[mlir][sparse] migrate tests to sparse_tensor.print (#84055)
Continuing the efforts started in #83357
1 parent 64faa52 commit b6ca602

File tree

5 files changed

+183
-234
lines changed

5 files changed

+183
-234
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir

Lines changed: 35 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -51,11 +51,6 @@
5151

5252
module {
5353
func.func private @printMemrefF64(%ptr : tensor<*xf64>)
54-
func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
55-
56-
//
57-
// Tests without permutation.
58-
//
5954

6055
// Concats all sparse matrices (with different encodings) to a sparse matrix.
6156
func.func @concat_sparse_sparse(%arg0: tensor<2x4xf64, #MAT_C_C>, %arg1: tensor<3x4xf64, #MAT_C_D>, %arg2: tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C> {
@@ -85,30 +80,15 @@ module {
8580
return %0 : tensor<9x4xf64>
8681
}
8782

88-
func.func @dump_mat_9x4(%A: tensor<9x4xf64, #MAT_C_C>) {
89-
%c = sparse_tensor.convert %A : tensor<9x4xf64, #MAT_C_C> to tensor<9x4xf64>
90-
%cu = tensor.cast %c : tensor<9x4xf64> to tensor<*xf64>
91-
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
92-
93-
%n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_C_C>
94-
vector.print %n : index
95-
96-
%1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_C_C> to memref<?xf64>
97-
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
98-
99-
bufferization.dealloc_tensor %c : tensor<9x4xf64>
100-
return
101-
}
102-
83+
// Outputs dense matrix.
10384
func.func @dump_mat_dense_9x4(%A: tensor<9x4xf64>) {
10485
%u = tensor.cast %A : tensor<9x4xf64> to tensor<*xf64>
10586
call @printMemrefF64(%u) : (tensor<*xf64>) -> ()
106-
10787
return
10888
}
10989

11090
// Driver method to call and verify kernels.
111-
func.func @entry() {
91+
func.func @main() {
11292
%m24 = arith.constant dense<
11393
[ [ 1.0, 0.0, 3.0, 0.0],
11494
[ 0.0, 2.0, 0.0, 0.0] ]> : tensor<2x4xf64>
@@ -126,22 +106,24 @@ module {
126106
%sm34cd = sparse_tensor.convert %m34 : tensor<3x4xf64> to tensor<3x4xf64, #MAT_C_D>
127107
%sm44dc = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C>
128108

129-
// CHECK: {{\[}}[1, 0, 3, 0],
130-
// CHECK-NEXT: [0, 2, 0, 0],
131-
// CHECK-NEXT: [1, 0, 1, 1],
132-
// CHECK-NEXT: [0, 0.5, 0, 0],
133-
// CHECK-NEXT: [1, 5, 2, 0],
134-
// CHECK-NEXT: [0, 0, 1.5, 1],
135-
// CHECK-NEXT: [0, 3.5, 0, 0],
136-
// CHECK-NEXT: [1, 5, 2, 0],
137-
// CHECK-NEXT: [1, 0.5, 0, 0]]
138-
// CHECK-NEXT: 18
139-
// CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
109+
//
110+
// CHECK: ---- Sparse Tensor ----
111+
// CHECK-NEXT: nse = 18
112+
// CHECK-NEXT: dim = ( 9, 4 )
113+
// CHECK-NEXT: lvl = ( 9, 4 )
114+
// CHECK-NEXT: pos[0] : ( 0, 9,
115+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
116+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
117+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
118+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
119+
// CHECK-NEXT: ----
120+
//
140121
%0 = call @concat_sparse_sparse(%sm24cc, %sm34cd, %sm44dc)
141122
: (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
142-
call @dump_mat_9x4(%0) : (tensor<9x4xf64, #MAT_C_C>) -> ()
123+
sparse_tensor.print %0 : tensor<9x4xf64, #MAT_C_C>
143124

144-
// CHECK: {{\[}}[1, 0, 3, 0],
125+
//
126+
// CHECK: {{\[}}[1, 0, 3, 0],
145127
// CHECK-NEXT: [0, 2, 0, 0],
146128
// CHECK-NEXT: [1, 0, 1, 1],
147129
// CHECK-NEXT: [0, 0.5, 0, 0],
@@ -150,26 +132,29 @@ module {
150132
// CHECK-NEXT: [0, 3.5, 0, 0],
151133
// CHECK-NEXT: [1, 5, 2, 0],
152134
// CHECK-NEXT: [1, 0.5, 0, 0]]
135+
//
153136
%1 = call @concat_sparse_dense(%sm24cc, %sm34cd, %sm44dc)
154137
: (tensor<2x4xf64, #MAT_C_C>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
155138
call @dump_mat_dense_9x4(%1) : (tensor<9x4xf64>) -> ()
156139

157-
// CHECK: {{\[}}[1, 0, 3, 0],
158-
// CHECK-NEXT: [0, 2, 0, 0],
159-
// CHECK-NEXT: [1, 0, 1, 1],
160-
// CHECK-NEXT: [0, 0.5, 0, 0],
161-
// CHECK-NEXT: [1, 5, 2, 0],
162-
// CHECK-NEXT: [0, 0, 1.5, 1],
163-
// CHECK-NEXT: [0, 3.5, 0, 0],
164-
// CHECK-NEXT: [1, 5, 2, 0],
165-
// CHECK-NEXT: [1, 0.5, 0, 0]]
166-
// CHECK-NEXT: 18
167-
// CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
140+
//
141+
// CHECK: ---- Sparse Tensor ----
142+
// CHECK-NEXT: nse = 18
143+
// CHECK-NEXT: dim = ( 9, 4 )
144+
// CHECK-NEXT: lvl = ( 9, 4 )
145+
// CHECK-NEXT: pos[0] : ( 0, 9,
146+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8,
147+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18,
148+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1,
149+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5,
150+
// CHECK-NEXT: ----
151+
//
168152
%2 = call @concat_mix_sparse(%m24, %sm34cd, %sm44dc)
169153
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
170-
call @dump_mat_9x4(%2) : (tensor<9x4xf64, #MAT_C_C>) -> ()
154+
sparse_tensor.print %2 : tensor<9x4xf64, #MAT_C_C>
171155

172-
// CHECK: {{\[}}[1, 0, 3, 0],
156+
//
157+
// CHECK: {{\[}}[1, 0, 3, 0],
173158
// CHECK-NEXT: [0, 2, 0, 0],
174159
// CHECK-NEXT: [1, 0, 1, 1],
175160
// CHECK-NEXT: [0, 0.5, 0, 0],
@@ -178,11 +163,11 @@ module {
178163
// CHECK-NEXT: [0, 3.5, 0, 0],
179164
// CHECK-NEXT: [1, 5, 2, 0],
180165
// CHECK-NEXT: [1, 0.5, 0, 0]]
166+
//
181167
%3 = call @concat_mix_dense(%m24, %sm34cd, %sm44dc)
182168
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
183169
call @dump_mat_dense_9x4(%3) : (tensor<9x4xf64>) -> ()
184170

185-
186171
// Release resources.
187172
bufferization.dealloc_tensor %sm24cc : tensor<2x4xf64, #MAT_C_C>
188173
bufferization.dealloc_tensor %sm34cd : tensor<3x4xf64, #MAT_C_D>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block_matmul.mlir

Lines changed: 66 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -45,7 +45,6 @@
4545
map = ( i, j ) -> (i : dense, j : compressed)
4646
}>
4747

48-
4948
#BSR = #sparse_tensor.encoding<{
5049
map = ( i, j ) ->
5150
( i floordiv 2 : dense,
@@ -65,67 +64,66 @@
6564

6665
module {
6766

68-
func.func @mul(%arg0: tensor<4x8xf64>,
69-
%arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
70-
%out = arith.constant dense<0.0> : tensor<4x4xf64>
71-
%0 = linalg.generic #trait_mul
72-
ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #BSR>)
73-
outs(%out: tensor<4x4xf64>) {
74-
^bb(%x: f64, %y : f64, %z : f64):
75-
%1 = arith.mulf %x, %y : f64
76-
%2 = arith.addf %1, %z : f64
77-
linalg.yield %2 : f64
78-
} -> tensor<4x4xf64>
79-
return %0 : tensor<4x4xf64>
80-
}
81-
82-
func.func @mul_24(%arg0: tensor<4x8xf64>,
83-
%arg1: tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64> {
84-
%out = arith.constant dense<0.0> : tensor<4x4xf64>
85-
%0 = linalg.generic #trait_mul
86-
ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #NV_24>)
87-
outs(%out: tensor<4x4xf64>) {
88-
^bb(%x: f64, %y : f64, %z : f64):
89-
%1 = arith.mulf %x, %y : f64
90-
%2 = arith.addf %1, %z : f64
91-
linalg.yield %2 : f64
92-
} -> tensor<4x4xf64>
93-
return %0 : tensor<4x4xf64>
94-
}
67+
func.func @mul(%arg0: tensor<4x8xf64>,
68+
%arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
69+
%out = arith.constant dense<0.0> : tensor<4x4xf64>
70+
%0 = linalg.generic #trait_mul
71+
ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #BSR>)
72+
outs(%out: tensor<4x4xf64>) {
73+
^bb(%x: f64, %y : f64, %z : f64):
74+
%1 = arith.mulf %x, %y : f64
75+
%2 = arith.addf %1, %z : f64
76+
linalg.yield %2 : f64
77+
} -> tensor<4x4xf64>
78+
return %0 : tensor<4x4xf64>
79+
}
9580

96-
func.func @mul_csr_bsr(%arg0: tensor<4x8xf64, #CSR>,
97-
%arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
98-
%out = arith.constant dense<0.0> : tensor<4x4xf64>
99-
%0 = linalg.generic #trait_mul
100-
ins(%arg0, %arg1: tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>)
101-
outs(%out: tensor<4x4xf64>) {
102-
^bb(%x: f64, %y : f64, %z : f64):
103-
%1 = arith.mulf %x, %y : f64
104-
%2 = arith.addf %1, %z : f64
105-
linalg.yield %2 : f64
106-
} -> tensor<4x4xf64>
107-
return %0 : tensor<4x4xf64>
108-
}
81+
func.func @mul_24(%arg0: tensor<4x8xf64>,
82+
%arg1: tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64> {
83+
%out = arith.constant dense<0.0> : tensor<4x4xf64>
84+
%0 = linalg.generic #trait_mul
85+
ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64, #NV_24>)
86+
outs(%out: tensor<4x4xf64>) {
87+
^bb(%x: f64, %y : f64, %z : f64):
88+
%1 = arith.mulf %x, %y : f64
89+
%2 = arith.addf %1, %z : f64
90+
linalg.yield %2 : f64
91+
} -> tensor<4x4xf64>
92+
return %0 : tensor<4x4xf64>
93+
}
10994

110-
func.func @mul_dense(%arg0: tensor<4x8xf64>,
111-
%arg1: tensor<4x8xf64>) -> tensor<4x4xf64> {
112-
%out = arith.constant dense<0.0> : tensor<4x4xf64>
113-
%0 = linalg.generic #trait_mul
114-
ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64>)
115-
outs(%out: tensor<4x4xf64>) {
116-
^bb(%x: f64, %y : f64, %z : f64):
117-
%1 = arith.mulf %x, %y : f64
118-
%2 = arith.addf %1, %z : f64
119-
linalg.yield %2 : f64
120-
} -> tensor<4x4xf64>
121-
return %0 : tensor<4x4xf64>
122-
}
95+
func.func @mul_csr_bsr(%arg0: tensor<4x8xf64, #CSR>,
96+
%arg1: tensor<4x8xf64, #BSR>) -> tensor<4x4xf64> {
97+
%out = arith.constant dense<0.0> : tensor<4x4xf64>
98+
%0 = linalg.generic #trait_mul
99+
ins(%arg0, %arg1: tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>)
100+
outs(%out: tensor<4x4xf64>) {
101+
^bb(%x: f64, %y : f64, %z : f64):
102+
%1 = arith.mulf %x, %y : f64
103+
%2 = arith.addf %1, %z : f64
104+
linalg.yield %2 : f64
105+
} -> tensor<4x4xf64>
106+
return %0 : tensor<4x4xf64>
107+
}
123108

109+
func.func @mul_dense(%arg0: tensor<4x8xf64>,
110+
%arg1: tensor<4x8xf64>) -> tensor<4x4xf64> {
111+
%out = arith.constant dense<0.0> : tensor<4x4xf64>
112+
%0 = linalg.generic #trait_mul
113+
ins(%arg0, %arg1: tensor<4x8xf64>, tensor<4x8xf64>)
114+
outs(%out: tensor<4x4xf64>) {
115+
^bb(%x: f64, %y : f64, %z : f64):
116+
%1 = arith.mulf %x, %y : f64
117+
%2 = arith.addf %1, %z : f64
118+
linalg.yield %2 : f64
119+
} -> tensor<4x4xf64>
120+
return %0 : tensor<4x4xf64>
121+
}
124122

125123
//
126-
// Output utilities.
124+
// Output utility.
127125
//
128-
func.func @dumpf64(%arg0: tensor<4x4xf64>) {
126+
func.func @dump_dense_f64(%arg0: tensor<4x4xf64>) {
129127
%c0 = arith.constant 0 : index
130128
%d0 = arith.constant -1.0 : f64
131129
%0 = vector.transfer_read %arg0[%c0, %c0], %d0: tensor<4x4xf64>, vector<4x4xf64>
@@ -136,36 +134,32 @@ func.func @mul_dense(%arg0: tensor<4x8xf64>,
136134
//
137135
// Main driver.
138136
//
139-
func.func @entry() {
137+
func.func @main() {
140138
%c0 = arith.constant 0 : index
141-
%c1 = arith.constant 1 : index
142-
%c2 = arith.constant 2 : index
143-
144139

145140
%td = arith.constant dense<[[ 1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 4.0, 5.0],
146141
[ 6.0, 7.0, 0.0, 0.0, 0.0, 0.0, 10.0, 11.0],
147142
[ 0.0, 0.0, 12.0, 13.0, 16.0, 17.0, 0.0, 0.0],
148143
[ 0.0, 0.0, 18.0, 19.0, 22.0, 23.0, 0.0, 0.0]]> : tensor<4x8xf64>
149144

150-
151-
%2 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #BSR>
152-
%3 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #NV_24>
153-
%4 = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
145+
%a = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #BSR>
146+
%b = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #NV_24>
147+
%c = sparse_tensor.convert %td : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
154148

155149
%d = call @mul_dense(%td, %td)
156150
: (tensor<4x8xf64>, tensor<4x8xf64>) -> tensor<4x4xf64>
157-
%s = call @mul(%td, %2)
151+
%s = call @mul(%td, %a)
158152
: (tensor<4x8xf64>, tensor<4x8xf64, #BSR>) -> tensor<4x4xf64>
159-
%s24 = call @mul_24(%td, %3)
153+
%s24 = call @mul_24(%td, %b)
160154
: (tensor<4x8xf64>, tensor<4x8xf64, #NV_24>) -> tensor<4x4xf64>
161-
%scsr = call @mul_csr_bsr(%4, %2)
155+
%scsr = call @mul_csr_bsr(%c, %a)
162156
: (tensor<4x8xf64, #CSR>, tensor<4x8xf64, #BSR>) -> tensor<4x4xf64>
163157

164158
// CHECK-COUNT-4: ( ( 46, 115, 0, 0 ), ( 115, 306, 0, 0 ), ( 0, 0, 858, 1206 ), ( 0, 0, 1206, 1698 ) )
165-
call @dumpf64(%d) : (tensor<4x4xf64>) -> ()
166-
call @dumpf64(%s) : (tensor<4x4xf64>) -> ()
167-
call @dumpf64(%s24) : (tensor<4x4xf64>) -> ()
168-
call @dumpf64(%scsr) : (tensor<4x4xf64>) -> ()
159+
call @dump_dense_f64(%d) : (tensor<4x4xf64>) -> ()
160+
call @dump_dense_f64(%s) : (tensor<4x4xf64>) -> ()
161+
call @dump_dense_f64(%s24) : (tensor<4x4xf64>) -> ()
162+
call @dump_dense_f64(%scsr) : (tensor<4x4xf64>) -> ()
169163

170164
return
171165
}

0 commit comments

Comments
 (0)