Skip to content

Commit 7d77aa4

Browse files
yinying-lisa-lizahiraam
authored andcommitted
[mlir][sparse] Migrate tests to use new syntax (llvm#66543)
**COO** `lvlTypes = [ "compressed_nu", "singleton" ]` to `map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)` `lvlTypes = [ "compressed_nu_no", "singleton_no" ]` to `map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))` **SortedCOO** `lvlTypes = [ "compressed_nu", "singleton" ]` to `map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)` **BCOO** `lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]` to `map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)` **BCSR** `lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)>` to `map = ( i, j ) -> ( i floordiv 2 : compressed, j floordiv 3 : compressed, i mod 2 : dense, j mod 3 : dense )` **Tensor and other supported formats(e.g. CCC, CDC, CCCC)** Currently, ELL and slice are not supported yet in the new syntax and the CHECK tests will be updated once printing is set to output the new syntax. Previous PRs: llvm#66146, llvm#66309, llvm#66443
1 parent 70e9d70 commit 7d77aa4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+143
-146
lines changed

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
200200

201201
// Sorted Coordinate Scheme.
202202
#SortedCOO = #sparse_tensor.encoding<{
203-
lvlTypes = [ "compressed_nu", "singleton" ]
203+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
204204
}>
205205
... tensor<?x?xf64, #SortedCOO> ...
206206

@@ -214,8 +214,12 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
214214

215215
// Block sparse row storage (2x3 blocks).
216216
#BCSR = #sparse_tensor.encoding<{
217-
lvlTypes = [ "compressed", "compressed", "dense", "dense" ],
218-
dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)>
217+
map = ( i, j ) ->
218+
( i floordiv 2 : compressed,
219+
j floordiv 3 : compressed,
220+
i mod 2 : dense,
221+
j mod 3 : dense
222+
)
219223
}>
220224
... tensor<20x30xf32, #BCSR> ...
221225

mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
// RUN: --sparsification="enable-gpu-libgen" | FileCheck %s
33

44
#SortedCOO = #sparse_tensor.encoding<{
5-
lvlTypes = [ "compressed_nu", "singleton" ]
5+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
66
}>
77

88
module {

mlir/test/Dialect/SparseTensor/codegen.mlir

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
}>
2828

2929
#UCSR = #sparse_tensor.encoding<{
30-
lvlTypes = [ "dense", "compressed_no" ]
30+
map = (d0, d1) -> (d0 : dense, d1 : compressed(nonordered))
3131
}>
3232

3333
#CSC = #sparse_tensor.encoding<{
@@ -41,21 +41,19 @@
4141
}>
4242

4343
#Dense3D = #sparse_tensor.encoding<{
44-
lvlTypes = [ "dense", "dense", "dense" ],
45-
dimToLvl = affine_map<(i, j, k) -> (k, i, j)>
44+
map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense)
4645
}>
4746

4847
#Coo = #sparse_tensor.encoding<{
49-
lvlTypes = [ "compressed_nu", "singleton" ]
48+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
5049
}>
5150

5251
#CooPNo = #sparse_tensor.encoding<{
53-
lvlTypes = [ "compressed_nu", "singleton_no" ],
54-
dimToLvl = affine_map<(i, j) -> (j, i)>
52+
map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton(nonordered))
5553
}>
5654

5755
#ccoo = #sparse_tensor.encoding<{
58-
lvlTypes = [ "compressed", "compressed_nu", "singleton" ]
56+
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed(nonunique), d2 : singleton)
5957
}>
6058

6159
// CHECK-LABEL: func @sparse_nop(

mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
22

33
#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
4-
#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed_nu", "singleton"]}>
4+
#COO = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
55

66
// CHECK-LABEL: func.func @sparse_alloc_copy_CSR(
77
// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>,

mlir/test/Dialect/SparseTensor/conversion.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,7 @@
2525
}>
2626

2727
#SparseTensor = #sparse_tensor.encoding<{
28-
lvlTypes = ["dense", "compressed", "compressed"],
29-
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
28+
map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
3029
}>
3130

3231
// CHECK-LABEL: func @sparse_nop(

mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,7 @@
1515
}>
1616

1717
#SparseTensor = #sparse_tensor.encoding<{
18-
lvlTypes = ["dense", "compressed", "compressed"],
19-
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
18+
map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
2019
}>
2120

2221
// CHECK-LABEL: func @sparse_convert_1d(

mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@
1212
}>
1313

1414
#SparseTensor = #sparse_tensor.encoding<{
15-
lvlTypes = ["dense", "compressed", "compressed"],
16-
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
15+
map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed)
1716
}>
1817

1918
// CHECK-LABEL: func @sparse_convert_1d(

mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,16 @@
2626
}>
2727

2828
#SortedCOO2D = #sparse_tensor.encoding<{
29-
lvlTypes = [ "compressed_nu", "singleton" ],
29+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton),
3030
}>
3131

3232
#SortedCOO3D = #sparse_tensor.encoding<{
33-
lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
33+
map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton)
3434

3535
}>
3636

3737
#TsssPermuted = #sparse_tensor.encoding<{
38-
lvlTypes = [ "compressed", "compressed", "compressed" ],
39-
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
38+
map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed)
4039
}>
4140

4241
#COOSlice = #sparse_tensor.encoding<{
@@ -115,13 +114,13 @@ func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32
115114
}
116115

117116
#SparseSingleton64 = #sparse_tensor.encoding<{
118-
lvlTypes = ["singleton"],
117+
map = (d0) -> (d0 : singleton),
119118
posWidth = 64,
120119
crdWidth = 64
121120
}>
122121

123122
#SparseSingleton32 = #sparse_tensor.encoding<{
124-
lvlTypes = ["singleton"],
123+
map = (d0) -> (d0 : singleton),
125124
posWidth = 32,
126125
crdWidth = 32
127126
}>

mlir/test/Dialect/SparseTensor/invalid.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord
3232

3333
// -----
3434

35-
#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
35+
#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}>
3636

3737
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
3838
-> tensor<100x2xf64, #SparseVector> {
@@ -68,7 +68,7 @@ func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: ten
6868

6969
// -----
7070

71-
#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
71+
#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}>
7272

7373
func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
7474
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
@@ -270,7 +270,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>)
270270

271271
// -----
272272

273-
#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
273+
#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
274274

275275
func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index {
276276
// expected-error@+1 {{requested position memory size on a singleton level}}
@@ -658,7 +658,7 @@ func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>,
658658

659659
#C = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
660660
#DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
661-
#DCC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed", "compressed"]}>
661+
#DCC = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed)}>
662662
func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>,
663663
%arg1: tensor<3x4xf64, #DC>,
664664
%arg2: tensor<4x4x4xf64, #DCC>) -> tensor<9x4xf64, #DC> {

mlir/test/Dialect/SparseTensor/pre_rewriting.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
}>
66

77
#SortedCOO = #sparse_tensor.encoding<{
8-
lvlTypes = [ "compressed_nu", "singleton" ]
8+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
99
}>
1010

1111
#DCSR = #sparse_tensor.encoding<{

mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
}>
1111

1212
#COO = #sparse_tensor.encoding<{
13-
lvlTypes = [ "compressed_nu", "singleton" ]
13+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
1414
}>
1515

1616
// CHECK-LABEL: func.func @sparse_new(

mlir/test/Dialect/SparseTensor/roundtrip.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ func.func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32,
7777

7878
// -----
7979

80-
#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>
80+
#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }>
8181

8282
// CHECK-LABEL: func @sparse_convert_3d_from_sparse(
8383
// CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>)
@@ -103,7 +103,7 @@ func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xi
103103

104104
// -----
105105

106-
#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
106+
#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
107107

108108
// CHECK-LABEL: func @sparse_indices_buffer(
109109
// CHECK-SAME: %[[A:.*]]: tensor<?x?xf64, #{{.*}}>)

mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,12 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
8585
// -----
8686

8787
#BCSR = #sparse_tensor.encoding<{
88-
lvlTypes = [ "compressed", "compressed", "dense", "dense" ],
89-
dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)>
88+
map = ( i, j ) ->
89+
( i floordiv 2 : compressed,
90+
j floordiv 3 : compressed,
91+
i mod 2 : dense,
92+
j mod 3 : dense
93+
)
9094
}>
9195

9296
// CHECK-LABEL: func private @sparse_bcsr(

mlir/test/Dialect/SparseTensor/sorted_coo.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s
22

33
#SortedCOO = #sparse_tensor.encoding<{
4-
lvlTypes = [ "compressed_nu", "singleton" ]
4+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
55
}>
66

77
#trait_scale = {

mlir/test/Dialect/SparseTensor/sparse_2d.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
10501050
}
10511051

10521052
#BatchedVector = #sparse_tensor.encoding<{
1053-
lvlTypes = [ "dense", "compressed_hi" ],
1053+
map = (d0, d1) -> (d0 : dense, d1 : compressed(high))
10541054
}>
10551055
// CHECK-LABEL: func.func @sub_ss_batched(
10561056
// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>,

mlir/test/Dialect/SparseTensor/sparse_3d.mlir

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33

44
#Td = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }>
55

6-
#Tddd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }>
7-
#Tdds = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>
8-
#Tdsd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>
9-
#Tdss = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>
10-
#Tsdd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>
11-
#Tsds = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>
12-
#Tssd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>
13-
#Tsss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>
6+
#Tddd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : dense) }>
7+
#Tdds = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }>
8+
#Tdsd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense) }>
9+
#Tdss = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed) }>
10+
#Tsdd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : dense) }>
11+
#Tsds = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : compressed) }>
12+
#Tssd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : dense) }>
13+
#Tsss = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }>
1414

1515
#trait3 = {
1616
indexing_maps = [

mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// RUN: mlir-opt %s --sparsification --canonicalize --cse | FileCheck %s
22

33
#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
4-
#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>
4+
#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }>
55

66
#trait = {
77
indexing_maps = [

mlir/test/Dialect/SparseTensor/sparse_foreach.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) {
141141
}
142142

143143
#BCOO = #sparse_tensor.encoding<{
144-
lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ],
144+
map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton)
145145
}>
146146

147147
// CHECK-LABEL: func.func @foreach_bcoo(

mlir/test/Dialect/SparseTensor/sparse_nd.mlir

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@
55
// but an acyclic iteration graph using sparse constraints only.
66

77
#SparseTensor = #sparse_tensor.encoding<{
8-
lvlTypes = [ "dense", "dense", "dense", "compressed",
9-
"compressed", "dense", "dense", "dense" ]
8+
map = (d0, d1, d2, d3,
9+
d4, d5, d6, d7) -> (d0 : dense, d1 : dense, d2 : dense,
10+
d3 : compressed, d4 : compressed, d5 : dense,
11+
d6 : dense, d7 : dense)
1012
}>
1113

1214
#trait_mul = {

mlir/test/Dialect/SparseTensor/sparse_out.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
}>
1010

1111
#SparseTensor = #sparse_tensor.encoding<{
12-
lvlTypes = [ "compressed", "compressed", "compressed" ]
12+
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)
1313
}>
1414

1515
#trait_scale_inpl = {

mlir/test/Dialect/SparseTensor/sparse_pack.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// RUN: mlir-opt %s --canonicalize --post-sparsification-rewrite="enable-runtime-library=false" --sparse-tensor-codegen -cse --canonicalize | FileCheck %s
22

33
#COO = #sparse_tensor.encoding<{
4-
lvlTypes = ["compressed_nu", "singleton"],
4+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton),
55
crdWidth=32
66
}>
77

mlir/test/Dialect/SparseTensor/sparse_perm.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
// RUN: mlir-opt %s -sparsification | FileCheck %s
33

44
#X = #sparse_tensor.encoding<{
5-
lvlTypes = [ "dense", "dense", "dense" ],
6-
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
5+
map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense)
76
}>
87

98
#trait = {

mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@
44
// RUN: FileCheck %s --check-prefix=CHECK-MIR
55

66
#X = #sparse_tensor.encoding<{
7-
lvlTypes = [ "dense", "dense", "dense" ],
8-
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
7+
map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense)
98
}>
109

1110
#trait = {

mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
//
55
// RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s
66

7-
#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
8-
#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
7+
#COO_2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }>
8+
#COO_3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton), posWidth = 32, crdWidth = 32 }>
99

1010

1111
// CHECK-LABEL: func.func @sparse_reshape_fused(

mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
}
1616

1717
#VEC = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 32, crdWidth = 32 }>
18-
#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
19-
#CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }>
18+
#COO = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }>
19+
#CCC = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed), posWidth = 32, crdWidth = 32 }>
2020

2121
//
2222
// This kernel can be sparsified as all unsparsifiable operations'

mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %}
3434

3535

36-
#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
37-
#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
36+
#COO_2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }>
37+
#COO_3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton), posWidth = 32, crdWidth = 32 }>
3838

3939
module {
4040
func.func private @printMemref3dF32(%ptr : tensor<?x?x?xf32>) attributes { llvm.emit_c_interface }

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,17 +43,15 @@
4343
}>
4444

4545
#SortedCOO = #sparse_tensor.encoding<{
46-
lvlTypes = [ "compressed_nu", "singleton" ]
46+
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
4747
}>
4848

4949
#SortedCOOPerm = #sparse_tensor.encoding<{
50-
lvlTypes = [ "compressed_nu", "singleton" ],
51-
dimToLvl = affine_map<(i,j) -> (j,i)>
50+
map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton)
5251
}>
5352

5453
#CCCPerm = #sparse_tensor.encoding<{
55-
lvlTypes = [ "compressed", "compressed", "compressed"],
56-
dimToLvl = affine_map<(d0, d1, d2) -> (d1, d2, d0)>
54+
map = (d0, d1, d2) -> (d1 : compressed, d2 : compressed, d0 : compressed)
5755
}>
5856

5957
module {

0 commit comments

Comments
 (0)