Skip to content

Commit 3d51010

Browse files
committed
Revert "[mlir][bufferization] Switch tests to new deallocation pass pipeline (#66471)"
This reverts commit ea42b49. Some GPU integration tests are failing that I didn't observe locally. Reverting until I have a fix.
1 parent 2dbdc9f commit 3d51010

12 files changed

+118
-54
lines changed

mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s
1+
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -buffer-deallocation -canonicalize -split-input-file | FileCheck %s
22

33
// Run fuzzer with different seeds.
44
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
@@ -14,17 +14,20 @@ func.func @buffer_not_deallocated(%t : tensor<?xf32>, %c : i1) -> tensor<?xf32>
1414
// CHECK: %[[some_op:.*]] = "test.some_op"
1515
// CHECK: %[[alloc:.*]] = memref.alloc(%[[some_op]])
1616
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
17+
// CHECK-NOT: dealloc
1718
// CHECK: scf.yield %[[casted]]
1819
%sz = "test.some_op"() : () -> (index)
1920
%0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
2021
scf.yield %0 : tensor<?xf32>
2122
} else {
2223
// CHECK: } else {
23-
// CHECK: scf.yield %[[m]]
24+
// CHECK: %[[cloned:.*]] = bufferization.clone %[[m]]
25+
// CHECK: scf.yield %[[cloned]]
2426
scf.yield %t : tensor<?xf32>
2527
}
2628
// CHECK: }
2729
// CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
30+
// CHECK: memref.dealloc %[[r]]
2831
// CHECK: return %[[r_tensor]]
2932
return %r : tensor<?xf32>
3033
}
@@ -39,14 +42,16 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor<i32>,
3942
{
4043
// CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]]
4144
// CHECK: %[[r:.*]] = scf.if {{.*}} {
42-
// CHECK: scf.yield %[[arg0_m]]
45+
// CHECK: %[[clone:.*]] = bufferization.clone %[[arg0_m]]
46+
// CHECK: scf.yield %[[clone]]
4347
// CHECK: } else {
4448
// CHECK: %[[alloc:.*]] = memref.alloc
4549
// CHECK: memref.store %{{.*}}, %[[alloc]]
4650
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
4751
// CHECK: scf.yield %[[casted]]
4852
// CHECK: }
4953
// CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]]
54+
// CHECK: memref.dealloc %[[r]]
5055
// CHECK: return %[[r_t]]
5156
%3 = scf.if %cond -> (tensor<i32>) {
5257
scf.yield %arg0 : tensor<i32>

mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-compat.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
// RUN: mlir-opt %s \
77
// RUN: -one-shot-bufferize="allow-unknown-ops create-deallocs=0" \
8-
// RUN: -buffer-deallocation-pipeline | \
8+
// RUN: -buffer-deallocation | \
99
// RUN: FileCheck %s --check-prefix=CHECK-BUFFERDEALLOC
1010

1111
// CHECK-NODEALLOC-LABEL: func @out_of_place_bufferization

mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s
2-
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
3-
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
1+
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s
2+
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
3+
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
44

55
// Note: function-boundary-type-conversion=infer-layout-map with
66
// promote-buffer-results-to-out-params is an unsupported combination.
@@ -18,6 +18,7 @@
1818
// CHECK: memref.store %{{.*}}, %[[alloc]]
1919
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
2020
// CHECK: memref.copy %[[casted]], %[[arg1]]
21+
// CHECK: memref.dealloc %[[alloc]]
2122
// CHECK: return
2223
// CHECK: }
2324

@@ -28,6 +29,7 @@
2829
// CHECK-NO-LAYOUT: memref.copy %[[arg0]], %[[alloc]]
2930
// CHECK-NO-LAYOUT: memref.store {{.*}}, %[[alloc]]
3031
// CHECK-NO-LAYOUT: memref.copy %[[alloc]], %[[arg1]]
32+
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc]]
3133

3234
// CHECK-BASELINE-LABEL: func @callee(
3335
// CHECK-BASELINE-SAME: %[[arg0:.*]]: memref<5xf32, strided<[?], offset: ?>>) -> memref<5xf32> {
@@ -51,6 +53,7 @@ func.func @callee(%t: tensor<5xf32>) -> (tensor<5xf32>, tensor<5xf32>) {
5153
// CHECK: call @callee(%[[arg0]], %[[casted]])
5254
// CHECK: %[[l1:.*]] = memref.load %[[arg0]]
5355
// CHECK: %[[l2:.*]] = memref.load %[[casted]]
56+
// CHECK: memref.dealloc %[[alloc]]
5457
// CHECK: return %[[l1]], %[[l2]]
5558
// CHECK: }
5659

@@ -75,6 +78,7 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
7578
// CHECK: %[[subview:.*]] = memref.subview %[[alloc]]{{.*}} : memref<10x20xf32> to memref<2x5xf32, strided<[20, 1], offset: ?>>
7679
// CHECK: %[[casted:.*]] = memref.cast %[[subview]]
7780
// CHECK: memref.copy %[[casted]], %[[r]]
81+
// CHECK: memref.dealloc %[[alloc]]
7882

7983
// CHECK-NO-LAYOUT-LABEL: func @callee(
8084
// CHECK-NO-LAYOUT-SAME: %{{.*}}: index,
@@ -86,7 +90,9 @@ func.func @main(%t: tensor<5xf32>) -> (f32, f32) {
8690
// value and function signature.
8791
// CHECK-NO-LAYOUT: %[[alloc2:.*]] = memref.alloc() : memref<2x5xf32>
8892
// CHECK-NO-LAYOUT: memref.copy %[[subview]], %[[alloc2]]
93+
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc]]
8994
// CHECK-NO-LAYOUT: memref.copy %[[alloc2]], %[[r]]
95+
// CHECK-NO-LAYOUT: memref.dealloc %[[alloc2]]
9096

9197
// CHECK-BASELINE-LABEL: func @callee(
9298
// CHECK-BASELINE-SAME: %{{.*}}: index) -> memref<2x5xf32, strided<[20, 1], offset: ?>> {
@@ -104,11 +110,13 @@ func.func @callee(%idx: index) -> tensor<2x5xf32> {
104110
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<2x5xf32> to memref<2x5xf32, strided<[?, ?], offset: ?>>
105111
// CHECK: call @callee(%{{.*}}, %[[casted]])
106112
// CHECK: memref.load %[[casted]]
113+
// CHECK: memref.dealloc %[[alloc]]
107114

108115
// CHECK-NO-LAYOUT: func @main(
109116
// CHECK-NO-LAYOUT: %[[alloc:.*]] = memref.alloc() : memref<2x5xf32>
110117
// CHECK-NO-LAYOUT: call @callee(%{{.*}}, %[[alloc]])
111118
// CHECK-NO-LAYOUT: memref.load %[[alloc]]
119+
// CHECK-NO-LAYOUT: memref.dealloc
112120

113121
// CHECK-BASELINE: func @main(
114122
// CHECK-BASELINE: %[[call:.*]] = call @callee

0 commit comments

Comments
 (0)