Skip to content

Commit ea42b49

Browse files
authored
[mlir][bufferization] Switch tests to new deallocation pass pipeline (#66471)
Use the new ownership based deallocation pass pipeline in the regression and integration tests. Some one-shot bufferization tests tested one-shot bufferize and deallocation at the same time. I removed the deallocation pass there because the deallocation pass is already thoroughly tested by itself.
1 parent 38b4df5 commit ea42b49

12 files changed

+54
-118
lines changed

mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -buffer-deallocation -canonicalize -split-input-file | FileCheck %s
1+
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s
22

33
// Run fuzzer with different seeds.
44
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
@@ -14,20 +14,17 @@ func.func @buffer_not_deallocated(%t : tensor<?xf32>, %c : i1) -> tensor<?xf32>
1414
// CHECK: %[[some_op:.*]] = "test.some_op"
1515
// CHECK: %[[alloc:.*]] = memref.alloc(%[[some_op]])
1616
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
17-
// CHECK-NOT: dealloc
1817
// CHECK: scf.yield %[[casted]]
1918
%sz = "test.some_op"() : () -> (index)
2019
%0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
2120
scf.yield %0 : tensor<?xf32>
2221
} else {
2322
// CHECK: } else {
24-
// CHECK: %[[cloned:.*]] = bufferization.clone %[[m]]
25-
// CHECK: scf.yield %[[cloned]]
23+
// CHECK: scf.yield %[[m]]
2624
scf.yield %t : tensor<?xf32>
2725
}
2826
// CHECK: }
2927
// CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
30-
// CHECK: memref.dealloc %[[r]]
3128
// CHECK: return %[[r_tensor]]
3229
return %r : tensor<?xf32>
3330
}
@@ -42,16 +39,14 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor<i32>,
4239
{
4340
// CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]]
4441
// CHECK: %[[r:.*]] = scf.if {{.*}} {
45-
// CHECK: %[[clone:.*]] = bufferization.clone %[[arg0_m]]
46-
// CHECK: scf.yield %[[clone]]
42+
// CHECK: scf.yield %[[arg0_m]]
4743
// CHECK: } else {
4844
// CHECK: %[[alloc:.*]] = memref.alloc
4945
// CHECK: memref.store %{{.*}}, %[[alloc]]
5046
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
5147
// CHECK: scf.yield %[[casted]]
5248
// CHECK: }
5349
// CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]]
54-
// CHECK: memref.dealloc %[[r]]
5550
// CHECK: return %[[r_t]]
5651
%3 = scf.if %cond -> (tensor<i32>) {
5752
scf.yield %arg0 : tensor<i32>

mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
// RUN: mlir-opt %s \
77
// RUN: -one-shot-bufferize="allow-unknown-ops create-deallocs=0" \
8-
// RUN: -buffer-deallocation | \
8+
// RUN: -buffer-deallocation-pipeline | \
99
// RUN: FileCheck %s --check-prefix=CHECK-BUFFERDEALLOC
1010

1111
// CHECK-NODEALLOC-LABEL: func @out_of_place_bufferization

mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s
2-
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
3-
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
1+
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s
2+
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
3+
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
44

55
// Note: function-boundary-type-conversion=infer-layout-map with
66
// promote-buffer-results-to-out-params is an unsupported combination.
@@ -18,7 +18,6 @@
1818
// CHECK: memref.store %{{.*}}, %[[alloc]]
1919
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
2020
// CHECK: memref.copy %[[casted]], %[[arg1]]
21-
// CHECK: memref.dealloc %[[alloc]]
2221
// CHECK: return
2322
// CHECK: }
2423

@@ -29,7 +28,6 @@
2928
// CHECK-NO-LAYOUT: memref.copy %[[arg0]], %[[alloc]]
3029
// CHECK-NO-LAYOUT: memref.store {{.*}}, %[[alloc]]
3130
// CHECK-NO-LAYOUT: memref.copy %[[alloc]], %[[arg1]]
32-
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc]]
3331

3432
// CHECK-BASELINE-LABEL: func @callee(
3533
// CHECK-BASELINE-SAME: %[[arg0:.*]]: memref<5xf32, strided<[?], offset: ?>>) -> memref<5xf32> {
@@ -53,7 +51,6 @@ func.func @callee(%t: tensor<5xf32>) -> (tensor<5xf32>, tensor<5xf32>) {
5351
// CHECK: call @callee(%[[arg0]], %[[casted]])
5452
// CHECK: %[[l1:.*]] = memref.load %[[arg0]]
5553
// CHECK: %[[l2:.*]] = memref.load %[[casted]]
56-
// CHECK: memref.dealloc %[[alloc]]
5754
// CHECK: return %[[l1]], %[[l2]]
5855
// CHECK: }
5956

@@ -78,7 +75,6 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
7875
// CHECK: %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref<10x20xf32> to memref<2x5xf32, strided<[20, 1], offset: ?>>
7976
// CHECK: %[[casted:.*]] = memref.cast %[[subview]]
8077
// CHECK: memref.copy %[[casted]], %[[r]]
81-
// CHECK: memref.dealloc %[[alloc]]
8278

8379
// CHECK-NO-LAYOUT-LABEL: func @callee(
8480
// CHECK-NO-LAYOUT-SAME: %{{.*}}: index,
@@ -90,9 +86,7 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
9086
// value and function signature.
9187
// CHECK-NO-LAYOUT: %[[alloc2:.*]] = memref.alloc() : memref<2x5xf32>
9288
// CHECK-NO-LAYOUT: memref.copy %[[subview]], %[[alloc2]]
93-
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc]]
9489
// CHECK-NO-LAYOUT: memref.copy %[[alloc2]], %[[r]]
95-
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc2]]
9690

9791
// CHECK-BASELINE-LABEL: func @callee(
9892
// CHECK-BASELINE-SAME: %{{.*}}: index) -> memref<2x5xf32, strided<[20, 1], offset: ?>> {
@@ -110,13 +104,11 @@ func.func @callee(%idx: index) -> tensor<2x5xf32> {
110104
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<2x5xf32> to memref<2x5xf32, strided<[?, ?], offset: ?>>
111105
// CHECK: call @callee(%{{.*}}, %[[casted]])
112106
// CHECK: memref.load %[[casted]]
113-
// CHECK: memref.dealloc %[[alloc]]
114107

115108
// CHECK-NO-LAYOUT: func @main(
116109
// CHECK-NO-LAYOUT: %[[alloc:.*]] = memref.alloc() : memref<2x5xf32>
117110
// CHECK-NO-LAYOUT: call @callee(%{{.*}}, %[[alloc]])
118111
// CHECK-NO-LAYOUT: memref.load %[[alloc]]
119-
// CHECK-NO-LAYOUT: memref.dealloc
120112

121113
// CHECK-BASELINE: func @main(
122114
// CHECK-BASELINE: %[[call:.*]] = call @callee

0 commit comments

Comments
 (0)