Skip to content

Commit fb35cd3

Browse files
committed
[mlir][NFC] Update textual references of func to func.func in SparseTensor tests
The special case parsing of `func` operations is being removed.
1 parent 0254b0b commit fb35cd3

31 files changed

+225
-225
lines changed

mlir/test/Dialect/SparseTensor/conversion.mlir

Lines changed: 34 additions & 34 deletions
Large diffs are not rendered by default.

mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
// CHECK: }
4848
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<13xi32>
4949
// CHECK: return %[[T]] : tensor<13xi32>
50-
func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
50+
func.func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
5151
%0 = sparse_tensor.convert %arg0 : tensor<13xi32, #SparseVector> to tensor<13xi32>
5252
return %0 : tensor<13xi32>
5353
}
@@ -86,7 +86,7 @@ func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32>
8686
// CHECK: }
8787
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?xi32>
8888
// CHECK: return %[[T]] : tensor<?xi32>
89-
func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
89+
func.func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
9090
%0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
9191
return %0 : tensor<?xi32>
9292
}
@@ -130,7 +130,7 @@ func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32
130130
// CHECK: }
131131
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x4xf64>
132132
// CHECK: return %[[T]] : tensor<2x4xf64>
133-
func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
133+
func.func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
134134
%0 = sparse_tensor.convert %arg0 : tensor<2x4xf64, #SparseMatrix> to tensor<2x4xf64>
135135
return %0 : tensor<2x4xf64>
136136
}
@@ -174,7 +174,7 @@ func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64
174174
// CHECK: }
175175
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x4xf64>
176176
// CHECK: return %[[T]] : tensor<?x4xf64>
177-
func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
177+
func.func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
178178
%0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
179179
return %0 : tensor<?x4xf64>
180180
}
@@ -218,7 +218,7 @@ func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x
218218
// CHECK: }
219219
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x?xf64>
220220
// CHECK: return %[[T]] : tensor<2x?xf64>
221-
func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
221+
func.func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
222222
%0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
223223
return %0 : tensor<2x?xf64>
224224
}
@@ -262,7 +262,7 @@ func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x
262262
// CHECK: }
263263
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x?xf64>
264264
// CHECK: return %[[T]] : tensor<?x?xf64>
265-
func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
265+
func.func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
266266
%0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
267267
return %0 : tensor<?x?xf64>
268268
}
@@ -311,7 +311,7 @@ func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x
311311
// CHECK: }
312312
// CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x3x4xf64>
313313
// CHECK: return %[[T]] : tensor<2x3x4xf64>
314-
func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
314+
func.func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
315315
%0 = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseTensor> to tensor<2x3x4xf64>
316316
return %0 : tensor<2x3x4xf64>
317317
}

mlir/test/Dialect/SparseTensor/dense.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
// CHECK: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32x16xf32>
5757
// CHECK: return %[[VAL_16]] : tensor<32x16xf32>
5858
// CHECK: }
59-
func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
59+
func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
6060
%argx: tensor<32x16xf32> {linalg.inplaceable = false})
6161
-> tensor<32x16xf32> {
6262
%c = arith.constant 1.0 : f32
@@ -97,7 +97,7 @@ func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
9797
// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32x16xf32>
9898
// CHECK: return %[[VAL_15]] : tensor<32x16xf32>
9999
// CHECK: }
100-
func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
100+
func.func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
101101
%argx: tensor<32x16xf32> {linalg.inplaceable = true})
102102
-> tensor<32x16xf32> {
103103
%c = arith.constant 1.0 : f32
@@ -138,7 +138,7 @@ func @dense2(%arga: tensor<32x16xf32, #DenseMatrix>,
138138
// CHECK: %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
139139
// CHECK: return %[[VAL_15]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
140140
// CHECK: }
141-
func @dense3(%arga: tensor<32x16xf32>,
141+
func.func @dense3(%arga: tensor<32x16xf32>,
142142
%argx: tensor<32x16xf32, #DenseMatrix> {linalg.inplaceable = true})
143143
-> tensor<32x16xf32, #DenseMatrix> {
144144
%c = arith.constant 1.0 : f32
@@ -186,7 +186,7 @@ func @dense3(%arga: tensor<32x16xf32>,
186186
// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
187187
// CHECK: return %[[VAL_20]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>>
188188
// CHECK: }
189-
func @dense4(%arga: tensor<32x16x8xf32>,
189+
func.func @dense4(%arga: tensor<32x16x8xf32>,
190190
%argx: tensor<32x16xf32, #DenseMatrix> {linalg.inplaceable = true})
191191
-> tensor<32x16xf32, #DenseMatrix> {
192192
%0 = linalg.generic #trait_3d

mlir/test/Dialect/SparseTensor/fold.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>)
77
// CHECK-NOT: sparse_tensor.convert
88
// CHECK: return %[[A]] : tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>
9-
func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
9+
func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
1010
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
1111
return %0 : tensor<64xf32, #SparseVector>
1212
}
@@ -15,7 +15,7 @@ func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32,
1515
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
1616
// CHECK-NOT: sparse_tensor.convert
1717
// CHECK: return
18-
func @sparse_dce_convert(%arg0: tensor<64xf32>) {
18+
func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
1919
%0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
2020
return
2121
}
@@ -26,7 +26,7 @@ func @sparse_dce_convert(%arg0: tensor<64xf32>) {
2626
// CHECK-NOT: sparse_tensor.indices
2727
// CHECK-NOT: sparse_tensor.values
2828
// CHECK: return
29-
func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
29+
func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
3030
%c = arith.constant 0 : index
3131
%0 = sparse_tensor.pointers %arg0, %c : tensor<64xf32, #SparseVector> to memref<?xindex>
3232
%1 = sparse_tensor.indices %arg0, %c : tensor<64xf32, #SparseVector> to memref<?xindex>

0 commit comments

Comments
 (0)