Skip to content

Commit 92d38ad

Browse files
committed
[mlir][NFC] Update textual references of func to func.func in Linalg tests
The special case parsing of `func` operations is being removed.
1 parent 412b885 commit 92d38ad

File tree

84 files changed

+751
-751
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+751
-751
lines changed

mlir/test/Dialect/Linalg/affine.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
// Test that we can lower all the way to LLVM without crashing, don't check results here.
44
// RUN: mlir-opt %s -convert-linalg-to-affine-loops -convert-linalg-to-llvm -o=/dev/null 2>&1
55

6-
func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
6+
func.func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
77
%c0 = arith.constant 0 : index
88
%c1 = arith.constant 1 : index
99
%A = memref.view %arg0[%c0][%M, %K] : memref<?xi8> to memref<?x?xf32>
@@ -17,7 +17,7 @@ func @matmul(%arg0: memref<?xi8>, %M: index, %N: index, %K: index) {
1717
//----------------------------------------------------------------------------//
1818
// Named ops to loops.
1919
//----------------------------------------------------------------------------//
20-
func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
20+
func.func @named_batch_matmul(%A: memref<?x?x?xf32>, %B: memref<?x?x?xf32>, %C: memref<?x?x?xf32>) {
2121
linalg.batch_matmul ins(%A, %B: memref<?x?x?xf32>, memref<?x?x?xf32>)
2222
outs(%C : memref<?x?x?xf32>)
2323
return

mlir/test/Dialect/Linalg/bubble-up-extract-slice-op.mlir

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
//RUN: mlir-opt -test-linalg-transform-patterns=test-bubble-up-extract-slice-op-pattern -split-input-file %s | FileCheck %s
22

3-
func @dynamic(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>, %arg2: index, %arg3: index, %arg4: index, %arg5:index) -> tensor<?x?xf32> {
3+
func.func @dynamic(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>, %arg2: index, %arg3: index, %arg4: index, %arg5:index) -> tensor<?x?xf32> {
44
%0 = linalg.generic {
55
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
66
affine_map<(d0, d1) -> (d1)>,
@@ -27,7 +27,7 @@ func @dynamic(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>, %arg2: index, %arg3:
2727

2828
//-----
2929

30-
func @static(%arg0: tensor<16x8xf32>, %arg1: tensor<8xf32>) -> tensor<4x2xf32> {
30+
func.func @static(%arg0: tensor<16x8xf32>, %arg1: tensor<8xf32>) -> tensor<4x2xf32> {
3131
%0 = linalg.generic {
3232
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
3333
affine_map<(d0, d1) -> (d1)>,
@@ -54,7 +54,7 @@ func @static(%arg0: tensor<16x8xf32>, %arg1: tensor<8xf32>) -> tensor<4x2xf32> {
5454

5555
//-----
5656

57-
func @mixed(%arg0: tensor<?x8xf32>, %arg1: tensor<8xf32>, %arg2: index, %arg3: index) -> tensor<?x2xf32> {
57+
func.func @mixed(%arg0: tensor<?x8xf32>, %arg1: tensor<8xf32>, %arg2: index, %arg3: index) -> tensor<?x2xf32> {
5858
%0 = linalg.generic {
5959
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
6060
affine_map<(d0, d1) -> (d1)>,
@@ -81,7 +81,7 @@ func @mixed(%arg0: tensor<?x8xf32>, %arg1: tensor<8xf32>, %arg2: index, %arg3: i
8181

8282
//-----
8383

84-
func @dynamic_to_static(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>) -> tensor<4x2xf32> {
84+
func.func @dynamic_to_static(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>) -> tensor<4x2xf32> {
8585
%0 = linalg.generic {
8686
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
8787
affine_map<(d0, d1) -> (d1)>,
@@ -108,7 +108,7 @@ func @dynamic_to_static(%arg0: tensor<?x?xf32>, %arg1: tensor<?xf32>) -> tensor<
108108

109109
//-----
110110

111-
func @matmul_slice() -> tensor<2x2xf32> {
111+
func.func @matmul_slice() -> tensor<2x2xf32> {
112112
%lhs = arith.constant dense<1.0> : tensor<4x4xf32>
113113
%rhs = arith.constant dense<1.0> : tensor<4x4xf32>
114114
%dst = arith.constant dense<[[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0]]> : tensor<4x4xf32>
@@ -126,7 +126,7 @@ func @matmul_slice() -> tensor<2x2xf32> {
126126

127127
//-----
128128

129-
func @conv_slice(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>) -> tensor<1x32x32x16xf32> {
129+
func.func @conv_slice(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>) -> tensor<1x32x32x16xf32> {
130130
%c112 = arith.constant 112 : index
131131
%c32 = arith.constant 32 : index
132132
%c16 = arith.constant 16 : index

mlir/test/Dialect/Linalg/bufferize.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
// CHECK: }
2424
// CHECK: %[[RESULT:.*]] = bufferization.to_tensor %[[RESULT_MEMREF]] : memref<4xf32>
2525
// CHECK: return %[[RESULT]] : tensor<4xf32>
26-
func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
26+
func.func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
2727
%0 = linalg.generic {
2828
indexing_maps = [#map0, #map0],
2929
iterator_types = ["parallel"]
@@ -51,7 +51,7 @@ func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
5151
// CHECK: linalg.generic
5252
// CHECK-SAME: ins(%[[MEMREF]] : memref<?xf32>)
5353
// CHECK-SAME: outs(%[[OUT_BUF]] : memref<?xf32>) {
54-
func @init_tensor(%in : tensor<?xf32>, %size: index) -> tensor<?xf32> {
54+
func.func @init_tensor(%in : tensor<?xf32>, %size: index) -> tensor<?xf32> {
5555
%init = linalg.init_tensor [%size] : tensor<?xf32>
5656
%0 = linalg.generic {
5757
indexing_maps = [#map0, #map0],
@@ -77,7 +77,7 @@ func @init_tensor(%in : tensor<?xf32>, %size: index) -> tensor<?xf32> {
7777
// CHECK-SAME: ins(%{{.*}} : memref<4xf32>)
7878
// CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref<4xf32>, memref<4xf32>)
7979
// CHECK-NEXT: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: f32):
80-
func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
80+
func.func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
8181
%0, %1 = linalg.generic {
8282
indexing_maps = [#map0, #map0, #map0],
8383
iterator_types = ["parallel"]
@@ -109,7 +109,7 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
109109
// CHECK: linalg.generic
110110
// CHECK-SAME: ins(%[[MEMREF_ARG]] : memref<?x?xf32>)
111111
// CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref<?x?xf32>, memref<?x?xf32>)
112-
func @dynamic_results(%arg0: tensor<?x?xf32>)
112+
func.func @dynamic_results(%arg0: tensor<?x?xf32>)
113113
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
114114
%0, %1 = linalg.generic {
115115
indexing_maps = [#map_2d, #map_2d, #map_2d],
@@ -147,7 +147,7 @@ func @dynamic_results(%arg0: tensor<?x?xf32>)
147147
// CHECK: linalg.generic
148148
// CHECK-SAME: ins(%[[ARG0_MEMREF]] : memref<2x3x4xvector<3x4xi4>>)
149149
// CHECK-SAME: outs(%[[INIT_BUFFER]] : memref<3x2xf32>) {
150-
func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
150+
func.func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
151151
%arg1: tensor<3x2xf32>) -> (tensor<3x2xf32>) {
152152

153153
%0 = linalg.generic #trait
@@ -164,7 +164,7 @@ func @generic_with_init_tensor(%arg0: tensor<2x3x4xvector<3x4xi4>>,
164164

165165
// CHECK-LABEL: func @bufferize_fill(
166166
// CHECK-SAME: %[[IN:.*]]: tensor<?xf32>
167-
func @bufferize_fill(%arg0: tensor<?xf32>) -> tensor<?xf32> {
167+
func.func @bufferize_fill(%arg0: tensor<?xf32>) -> tensor<?xf32> {
168168
%c0 = arith.constant 0.0 : f32
169169
// CHECK: %[[ALLOC:.*]] = memref.alloc
170170
// CHECK: linalg.fill ins(%cst : f32) outs(%[[ALLOC]] : memref<?xf32>)
@@ -177,7 +177,7 @@ func @bufferize_fill(%arg0: tensor<?xf32>) -> tensor<?xf32> {
177177
// -----
178178

179179
// CHECK-LABEL: func @bufferize_dot
180-
func @bufferize_dot(%in: tensor<4xf32>, %out: tensor<f32>) -> tensor<f32> {
180+
func.func @bufferize_dot(%in: tensor<4xf32>, %out: tensor<f32>) -> tensor<f32> {
181181
%dot = linalg.dot ins(%in, %in : tensor<4xf32>, tensor<4xf32>)
182182
outs(%out : tensor<f32>) -> tensor<f32>
183183
return %dot : tensor<f32>

mlir/test/Dialect/Linalg/canonicalize-duplicate-inputs.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
// CHECK: #[[$MAP:.*]] = affine_map<(d0) -> (d0)>
88
// CHECK-LABEL: @basic
9-
func @basic(%arg0: tensor<?xf32>) -> tensor<?xf32> {
9+
func.func @basic(%arg0: tensor<?xf32>) -> tensor<?xf32> {
1010
// CHECK: linalg.generic{{.*}}[#[[$MAP]], #[[$MAP]]]
1111
// CHECK: attrs = {someattr}
1212
// CHECK: ^bb0(%[[BBARG:.*]]: f32, %{{.*}}: f32):
@@ -32,7 +32,7 @@ func @basic(%arg0: tensor<?xf32>) -> tensor<?xf32> {
3232
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
3333
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1, d0)>
3434
// CHECK-LABEL: @distinct_affine_maps
35-
func @distinct_affine_maps(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
35+
func.func @distinct_affine_maps(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
3636
// CHECK: linalg.generic{{.*}}[#[[$MAP0]], #[[$MAP1]], #[[$MAP0]]]
3737
%0 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]}
3838
ins(%arg0, %arg0 : tensor<?x?xf32>, tensor<?x?xf32>)
@@ -55,7 +55,7 @@ func @distinct_affine_maps(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
5555
// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
5656
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1) -> (d1, d0)>
5757
// CHECK-LABEL: @mixed_redundant_non_redundant
58-
func @mixed_redundant_non_redundant(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
58+
func.func @mixed_redundant_non_redundant(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
5959
// CHECK: linalg.generic{{.*}}[#[[$MAP0]], #[[$MAP1]], #[[$MAP0]]]
6060
// CHECK: ^bb0(%[[BBARG0:.*]]: f32, %[[BBARG1:.*]]: f32, %{{[a-zA-Z0-9]+}}: f32):
6161
// CHECK: "test.elementwise_mappable"(%[[BBARG0]], %[[BBARG1]], %[[BBARG0]])
@@ -77,7 +77,7 @@ func @mixed_redundant_non_redundant(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
7777

7878
// CHECK: #[[$MAP:.*]] = affine_map<(d0) -> (d0)>
7979
// CHECK-LABEL: @multiple_different_redundant_args
80-
func @multiple_different_redundant_args(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
80+
func.func @multiple_different_redundant_args(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
8181
// CHECK: linalg.generic{{.*}}[#[[$MAP]], #[[$MAP]], #[[$MAP]]]
8282
// CHECK: ^bb0(%[[BBARG0:.*]]: f32, %[[BBARG1:.*]]: f32, %{{[a-zA-Z0-9]+}}: f32):
8383
// CHECK: "test.elementwise_mappable"(%[[BBARG0]], %[[BBARG1]], %[[BBARG0]], %[[BBARG1]])

0 commit comments

Comments
 (0)