Skip to content

Commit 256ac46

Browse files
[mlir][sparse] Change tests to use new syntax for ELL and slice (#67569)
Examples: 1. `#ELL = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)> }>` to `#ELL = #sparse_tensor.encoding<{ map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed) }>` 2. `#CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>` to `#CSR_SLICE = #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed) }>`
1 parent a5b4ada commit 256ac46

File tree

12 files changed

+54
-73
lines changed

12 files changed

+54
-73
lines changed

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -237,11 +237,33 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
237237
}>
238238
... tensor<20x30xf32, #BSR_explicit> ...
239239

240+
// ELL format.
241+
// In the simple format for matrix, one array stores values and another
242+
// array stores column indices. The arrays have the same number of rows
243+
// as the original matrix, but only have as many columns as
244+
// the maximum number of nonzeros on a row of the original matrix.
245+
// There are many variants for ELL such as jagged diagonal scheme.
246+
// To implement ELL, map provides a notion of "counting a
247+
// dimension", where every stored element with the same coordinate
248+
// is mapped to a new slice. For instance, ELL storage of a 2-d
249+
// tensor can be defined with the mapping (i, j) -> (#i, i, j)
250+
// using the notation of [Chou20]. Lacking the # symbol in MLIR's
251+
// affine mapping, we use a free symbol c to define such counting,
252+
// together with a constant that denotes the number of resulting
253+
// slices. For example, the mapping [c](i, j) -> (c * 3 * i, i, j)
254+
// with the level-types ["dense", "dense", "compressed"] denotes ELL
255+
// storage with three jagged diagonals that count the dimension i.
256+
#ELL = #sparse_tensor.encoding<{
257+
map = [c](i, j) -> (c * 3 * i : dense, i : dense, j : compressed)
258+
}>
259+
... tensor<?x?xf64, #ELL> ...
260+
240261
// CSR slice (offset = 0, size = 4, stride = 1 on the first dimension;
241262
// offset = 0, size = 8, and a dynamic stride on the second dimension).
242263
#CSR_SLICE = #sparse_tensor.encoding<{
243-
lvlTypes = [ "dense", "compressed" ],
244-
dimSlices = [ (0, 4, 1), (0, 8, ?) ]
264+
map = (i : #sparse_tensor<slice(0, 4, 1)>,
265+
j : #sparse_tensor<slice(0, 8, ?)>) ->
266+
(i : dense, j : compressed)
245267
}>
246268
... tensor<?x?xf64, #CSC_SLICE> ...
247269

mlir/test/CAPI/sparse_tensor.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@ static int testRoundtripEncoding(MlirContext ctx) {
2525
// clang-format off
2626
const char *originalAsm =
2727
"#sparse_tensor.encoding<{ "
28-
"lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], "
29-
"dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, "
28+
"map = [s0](d0, d1) -> (s0 : dense, d0 : compressed, d1 : compressed), "
3029
"posWidth = 32, crdWidth = 64 }>";
3130
// clang-format on
3231
MlirAttribute originalAttr =

mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,7 @@
3939
}>
4040

4141
#COOSlice = #sparse_tensor.encoding<{
42-
lvlTypes = [ "compressed_nu", "singleton" ],
43-
dimSlices = [ (2, 2, 1), (12, 13, 1) ]
42+
map = (d0 : #sparse_tensor<slice(2, 2, 1)>, d1 : #sparse_tensor<slice(12, 13, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
4443
}>
4544

4645
// CHECK-LABEL: func @sparse_nop_convert(

mlir/test/Dialect/SparseTensor/invalid.mlir

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -201,8 +201,7 @@ func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<
201201
// -----
202202

203203
#CSR_SLICE = #sparse_tensor.encoding<{
204-
lvlTypes = [ "dense", "compressed" ],
205-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
204+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
206205
}>
207206

208207
func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -214,8 +213,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
214213
// -----
215214

216215
#CSR_SLICE = #sparse_tensor.encoding<{
217-
lvlTypes = [ "dense", "compressed" ],
218-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
216+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
219217
}>
220218

221219
func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
@@ -400,8 +398,7 @@ func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
400398
// -----
401399

402400
#CSR = #sparse_tensor.encoding<{
403-
lvlTypes = ["dense", "compressed"],
404-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
401+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
405402
}>
406403

407404
func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {

mlir/test/Dialect/SparseTensor/invalid_encoding.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,8 +218,7 @@ func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
218218
// -----
219219

220220
#CSR_SLICE = #sparse_tensor.encoding<{
221-
lvlTypes = [ "dense", "compressed" ],
222-
dimSlices = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}}
221+
map = (d0 : #sparse_tensor<slice(-1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed)// expected-error{{expect positive value or ? for slice offset/size/stride}}
223222
}>
224223
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
225224

mlir/test/Dialect/SparseTensor/pre_rewriting.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@
1313
}>
1414

1515
#Slice = #sparse_tensor.encoding<{
16-
lvlTypes = [ "compressed_nu", "singleton" ],
17-
dimSlices = [ (?, 1, 1), (?, 3, 1) ]
16+
map = (d0 : #sparse_tensor<slice(?, 1, 1)>, d1 : #sparse_tensor<slice(?, 3, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
1817
}>
1918

2019
#sel_trait = {

mlir/test/Dialect/SparseTensor/roundtrip.mlir

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,7 @@ func.func @sparse_values(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64>
143143
// -----
144144

145145
#CSR_SLICE = #sparse_tensor.encoding<{
146-
lvlTypes = [ "dense", "compressed" ],
147-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
146+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
148147
}>
149148

150149
// CHECK-LABEL: func @sparse_slice_offset(
@@ -159,8 +158,7 @@ func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index {
159158
// -----
160159

161160
#CSR_SLICE = #sparse_tensor.encoding<{
162-
lvlTypes = [ "dense", "compressed" ],
163-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
161+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
164162
}>
165163

166164
// CHECK-LABEL: func @sparse_slice_stride(
@@ -188,8 +186,7 @@ func.func @sparse_metadata_init() -> !sparse_tensor.storage_specifier<#SparseVec
188186

189187
#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
190188
#SparseVector_Slice = #sparse_tensor.encoding<{
191-
lvlTypes = ["compressed"],
192-
dimSlices = [ (?, ?, ?) ]
189+
map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
193190
}>
194191

195192
// CHECK-LABEL: func @sparse_metadata_init(
@@ -220,8 +217,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)
220217
// -----
221218

222219
#SparseVector_Slice = #sparse_tensor.encoding<{
223-
lvlTypes = ["compressed"],
224-
dimSlices = [ (?, ?, ?) ]
220+
map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
225221
}>
226222

227223
// CHECK-LABEL: func @sparse_get_md(
@@ -237,8 +233,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector_S
237233
// -----
238234

239235
#SparseVector = #sparse_tensor.encoding<{
240-
lvlTypes = ["compressed"],
241-
dimSlices = [ (?, ?, ?) ]
236+
map = (d0 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed)
242237
}>
243238

244239
// CHECK-LABEL: func @sparse_get_md(

mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
101101
// -----
102102

103103
#ELL = #sparse_tensor.encoding<{
104-
lvlTypes = [ "dense", "dense", "compressed" ],
105-
dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)>
104+
map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed)
106105
}>
107106

108107
// CHECK-LABEL: func private @sparse_ell(
@@ -112,8 +111,7 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
112111
// -----
113112

114113
#CSR_SLICE = #sparse_tensor.encoding<{
115-
lvlTypes = [ "dense", "compressed" ],
116-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
114+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
117115
}>
118116

119117
// CHECK-LABEL: func private @sparse_slice(
@@ -123,19 +121,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
123121
// -----
124122

125123
#CSR_SLICE = #sparse_tensor.encoding<{
126-
lvlTypes = [ "dense", "compressed" ],
127-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
128-
}>
129-
130-
// CHECK-LABEL: func private @sparse_slice(
131-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
132-
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
133-
134-
// -----
135-
136-
#CSR_SLICE = #sparse_tensor.encoding<{
137-
lvlTypes = [ "dense", "compressed" ],
138-
dimSlices = [ (1, ?, 1), (?, 4, 2) ]
124+
map = (d0 : #sparse_tensor<slice(1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed)
139125
}>
140126

141127
// CHECK-LABEL: func private @sparse_slice(

mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
}>
66

77
#CSR_SLICE = #sparse_tensor.encoding<{
8-
lvlTypes = [ "dense", "compressed" ],
9-
dimSlices = [ (0, 4, 1), (0, 8, 1) ]
8+
map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(0, 8, 1)>) -> (d0 : dense, d1 : compressed)
109
}>
1110

1211
// CHECK-LABEL: func.func @sparse_slice(

mlir/test/Dialect/SparseTensor/sparse_foreach.mlir

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,11 @@ func.func @sparse_foreach_constant() -> () {
2929
}
3030

3131
#CSR_SLICE = #sparse_tensor.encoding<{
32-
lvlTypes = [ "compressed", "compressed" ],
33-
dimSlices = [ (0, 4, 1), (2, 4, 1) ]
32+
map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(2, 4, 1)>) -> (d0 : compressed, d1 : compressed)
3433
}>
3534

3635
#CSR_SLICE_DYN = #sparse_tensor.encoding<{
37-
lvlTypes = [ "compressed", "compressed" ],
38-
dimSlices = [ (?, ?, ?), (?, ?, ?) ]
36+
map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed, d1 : compressed)
3937
}>
4038

4139

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,27 +28,23 @@
2828
}>
2929

3030
#CSR_SLICE = #sparse_tensor.encoding<{
31-
lvlTypes = [ "dense", "compressed" ],
32-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
31+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed)
3332
}>
3433

3534
#CSR_SLICE_DYN = #sparse_tensor.encoding<{
36-
lvlTypes = [ "dense", "compressed" ],
37-
dimSlices = [ (?, ?, ?), (?, ?, ?) ]
35+
map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : dense, d1 : compressed)
3836
}>
3937

4038
#COO = #sparse_tensor.encoding<{
4139
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
4240
}>
4341

4442
#COO_SLICE = #sparse_tensor.encoding<{
45-
lvlTypes = [ "compressed_nu", "singleton" ],
46-
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
43+
map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : compressed(nonunique), d1 : singleton)
4744
}>
4845

4946
#COO_SLICE_DYN = #sparse_tensor.encoding<{
50-
lvlTypes = [ "compressed_nu", "singleton" ],
51-
dimSlices = [ (?, ?, ?), (?, ?, ?) ]
47+
map = (d0 : #sparse_tensor<slice(?, ?, ?)>, d1 : #sparse_tensor<slice(?, ?, ?)>) -> (d0 : compressed(nonunique), d1 : singleton)
5248
}>
5349

5450

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -27,51 +27,43 @@
2727
}>
2828

2929
#DCSR_SLICE = #sparse_tensor.encoding<{
30-
lvlTypes = [ "compressed", "compressed" ],
31-
dimSlices = [ (0, 4, 1), (0, 8, 1) ]
30+
map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(0, 8, 1)>) -> (d0 : compressed, d1 : compressed)
3231
}>
3332

3433
#CSR = #sparse_tensor.encoding<{
3534
map = (d0, d1) -> (d0 : dense, d1 : compressed)
3635
}>
3736

3837
#CSR_SLICE = #sparse_tensor.encoding<{
39-
lvlTypes = [ "dense", "compressed" ],
40-
dimSlices = [ (0, 4, 1), (0, 8, 1) ]
38+
map = (d0 : #sparse_tensor<slice(0, 4, 1)>, d1 : #sparse_tensor<slice(0, 8, 1)>) -> (d0 : dense, d1 : compressed)
4139
}>
4240

4341
#COO = #sparse_tensor.encoding<{
4442
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
4543
}>
4644

4745
#CSR_SLICE_1 = #sparse_tensor.encoding<{
48-
lvlTypes = [ "dense", "compressed" ],
49-
dimSlices = [ (0, 4, 2), (0, 4, 1) ]
46+
map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(0, 4, 1)>) -> (d0 : dense, d1 : compressed)
5047
}>
5148

5249
#DCSR_SLICE_1 = #sparse_tensor.encoding<{
53-
lvlTypes = [ "compressed", "compressed" ],
54-
dimSlices = [ (0, 4, 2), (1, 4, 1) ]
50+
map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(1, 4, 1)>) -> (d0 : compressed, d1 : compressed)
5551
}>
5652

5753
#COO_SLICE_1 = #sparse_tensor.encoding<{
58-
lvlTypes = [ "compressed_nu", "singleton" ],
59-
dimSlices = [ (0, 4, 2), (0, 4, 1) ]
54+
map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(0, 4, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
6055
}>
6156

6257
#COO_SLICE_2 = #sparse_tensor.encoding<{
63-
lvlTypes = [ "compressed_nu", "singleton" ],
64-
dimSlices = [ (0, 4, 2), (1, 4, 1) ]
58+
map = (d0 : #sparse_tensor<slice(0, 4, 2)>, d1 : #sparse_tensor<slice(1, 4, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
6559
}>
6660

6761
#CSR_SLICE_dyn = #sparse_tensor.encoding<{
68-
lvlTypes = [ "dense", "compressed" ],
69-
dimSlices = [ (?, 4, ?), (?, 4, ?) ]
62+
map = (d0 : #sparse_tensor<slice(?, 4, ?)>, d1 : #sparse_tensor<slice(?, 4, ?)>) -> (d0 : dense, d1 : compressed)
7063
}>
7164

7265
#DCSR_SLICE_dyn = #sparse_tensor.encoding<{
73-
lvlTypes = [ "compressed", "compressed" ],
74-
dimSlices = [ (?, 4, ?), (?, 4, ?) ]
66+
map = (d0 : #sparse_tensor<slice(?, 4, ?)>, d1 : #sparse_tensor<slice(?, 4, ?)>) -> (d0 : compressed, d1 : compressed)
7567
}>
7668

7769
module {

0 commit comments

Comments
 (0)