Skip to content

Commit db81e88

Browse files
author
Stephan Herhut
committed
[mlir][memref] Mark memref.buffer_cast as NoSideEffect
This brings it in line with the bultin unrealized_conversion_cast, which memref.buffer_cast is a specialized version of. Differential Revision: https://reviews.llvm.org/D102608
1 parent 05de4b4 commit db81e88

File tree

2 files changed

+17
-15
lines changed

2 files changed

+17
-15
lines changed

mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ def MemRef_AllocaOp : AllocLikeOp<"alloca", AutomaticAllocationScopeResource> {
203203
//===----------------------------------------------------------------------===//
204204

205205
def MemRef_BufferCastOp : MemRef_Op<"buffer_cast",
206-
[SameOperandsAndResultShape, SameOperandsAndResultElementType,
206+
[SameOperandsAndResultShape, SameOperandsAndResultElementType, NoSideEffect,
207207
TypesMatchWith<"type of 'tensor' is the tensor equivalent of 'memref'",
208208
"memref", "tensor",
209209
"getTensorTypeFromMemRefType($_self)">]> {
@@ -218,6 +218,10 @@ def MemRef_BufferCastOp : MemRef_Op<"buffer_cast",
218218

219219
Note, that mutating the result of the buffer cast operation leads to
220220
undefined behavior.
221+
222+
This operation is a specialized variant of the built-in
223+
unrealized_conversion_cast and is intended for use in the context of
224+
gradual bufferization.
221225
}];
222226

223227
let arguments = (ins AnyTensor:$tensor);

mlir/test/Dialect/Linalg/bufferize.mlir

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -172,17 +172,16 @@ func @bufferize_subtensor(%t : tensor<?x?xf32>) -> (tensor<2x3xf32>, tensor<2x?x
172172
// CHECK: %[[IDX:.*]] = call @make_index() : () -> index
173173
%i0 = call @make_index() : () -> index
174174

175-
// CHECK: %[[M0:.*]] = memref.buffer_cast %[[T]] : memref<?x?xf32>
175+
// CHECK: %[[M:.*]] = memref.buffer_cast %[[T]] : memref<?x?xf32>
176176
// CHECK-NEXT: %[[A0:.*]] = memref.alloc() : memref<2x3xf32>
177-
// CHECK-NEXT: %[[SM0:.*]] = memref.subview %[[M0]][0, 0] [2, 3] [1, 1]
177+
// CHECK-NEXT: %[[SM0:.*]] = memref.subview %[[M]][0, 0] [2, 3] [1, 1]
178178
// CHECK-SAME: memref<?x?xf32> to memref<2x3xf32, #[[$MAP0]]>
179179
// CHECK-NEXT: linalg.copy(%[[SM0]], %[[A0]]) : memref<2x3xf32, #[[$MAP0]]>, memref<2x3xf32>
180180
// CHECK-NEXT: %[[RT0:.*]] = memref.tensor_load %[[A0]] : memref<2x3xf32>
181181
%st0 = subtensor %t[0, 0][2, 3][1, 1] : tensor<?x?xf32> to tensor<2x3xf32>
182182

183-
// CHECK: %[[M1:.*]] = memref.buffer_cast %[[T]] : memref<?x?xf32>
184183
// CHECK-NEXT: %[[A1:.*]] = memref.alloc(%[[IDX]]) : memref<2x?xf32>
185-
// CHECK-NEXT: %[[SM1:.*]] = memref.subview %[[M1]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2]
184+
// CHECK-NEXT: %[[SM1:.*]] = memref.subview %[[M]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2]
186185
// CHECK-SAME: memref<?x?xf32> to memref<2x?xf32, #[[$MAP1]]>
187186
// CHECK-NEXT: linalg.copy(%[[SM1]], %[[A1]]) : memref<2x?xf32, #[[$MAP1]]>, memref<2x?xf32>
188187
// CHECK-NEXT: %[[RT1:.*]] = memref.tensor_load %[[A1]] : memref<2x?xf32>
@@ -213,26 +212,25 @@ func @bufferize_subtensor_insert(%t : tensor<?x?xf32>, %st0 : tensor<2x3xf32>, %
213212
// CHECK: %[[IDX:.*]] = call @make_index() : () -> index
214213

215214

216-
// CHECK-DAG: %[[M0:.*]] = memref.buffer_cast %[[T]] : memref<?x?xf32>
215+
// CHECK-DAG: %[[M:.*]] = memref.buffer_cast %[[T]] : memref<?x?xf32>
217216
// CHECK-DAG: %[[SM0:.*]] = memref.buffer_cast %[[ST0]] : memref<2x3xf32>
218217
// CHECK-NEXT: %[[DIM0:.*]] = memref.dim %[[T]], %[[C0]] : tensor<?x?xf32>
219218
// CHECK-NEXT: %[[DIM1:.*]] = memref.dim %[[T]], %[[C1]] : tensor<?x?xf32>
220-
// CHECK-NEXT: %[[M0_COPY:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref<?x?xf32>
221-
// CHECK-NEXT: linalg.copy(%[[M0]], %[[M0_COPY]]) : memref<?x?xf32>, memref<?x?xf32>
222-
// CHECK-NEXT: %[[SUBVIEW0:.*]] = memref.subview %[[M0_COPY]][0, 0] [2, 3] [1, 1]
219+
// CHECK-NEXT: %[[M_COPY0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref<?x?xf32>
220+
// CHECK-NEXT: linalg.copy(%[[M]], %[[M_COPY0]]) : memref<?x?xf32>, memref<?x?xf32>
221+
// CHECK-NEXT: %[[SUBVIEW0:.*]] = memref.subview %[[M_COPY0]][0, 0] [2, 3] [1, 1]
223222
// CHECK-SAME: memref<?x?xf32> to memref<2x3xf32, #[[$MAP0]]>
224223
// CHECK-NEXT: linalg.copy(%[[SM0]], %[[SUBVIEW0]]) : memref<2x3xf32>, memref<2x3xf32, #[[$MAP0]]>
225-
// CHECK-NEXT: %[[RT0:.*]] = memref.tensor_load %[[M0_COPY]] : memref<?x?xf32>
224+
// CHECK-NEXT: %[[RT0:.*]] = memref.tensor_load %[[M_COPY0]] : memref<?x?xf32>
226225
%t0 = subtensor_insert %st0 into %t[0, 0][2, 3][1, 1] : tensor<2x3xf32> into tensor<?x?xf32>
227226

228-
// CHECK-DAG: %[[M1:.*]] = memref.buffer_cast %[[T]] : memref<?x?xf32>
229227
// CHECK-DAG: %[[SM1:.*]] = memref.buffer_cast %[[ST1]] : memref<2x?xf32>
230-
// CHECK-NEXT: %[[M1_COPY:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref<?x?xf32>
231-
// CHECK-NEXT: linalg.copy(%[[M1]], %[[M1_COPY]]) : memref<?x?xf32>, memref<?x?xf32>
232-
// CHECK-NEXT: %[[SUBVIEW1:.*]] = memref.subview %[[M1_COPY]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2]
228+
// CHECK-NEXT: %[[M_COPY1:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) : memref<?x?xf32>
229+
// CHECK-NEXT: linalg.copy(%[[M]], %[[M_COPY1]]) : memref<?x?xf32>, memref<?x?xf32>
230+
// CHECK-NEXT: %[[SUBVIEW1:.*]] = memref.subview %[[M_COPY1]][0, %[[IDX]]] [2, %[[IDX]]] [1, 2]
233231
// CHECK-SAME: memref<?x?xf32> to memref<2x?xf32, #[[$MAP1]]>
234232
// CHECK-NEXT: linalg.copy(%[[SM1]], %[[SUBVIEW1]]) : memref<2x?xf32>, memref<2x?xf32, #[[$MAP1]]>
235-
// CHECK-NEXT: %[[RT1:.*]] = memref.tensor_load %[[M1_COPY]] : memref<?x?xf32>
233+
// CHECK-NEXT: %[[RT1:.*]] = memref.tensor_load %[[M_COPY1]] : memref<?x?xf32>
236234
%t1 = subtensor_insert %st1 into %t[0, %i0][2, %i0][1, 2] : tensor<2x?xf32> into tensor<?x?xf32>
237235

238236
// CHECK: return %[[RT0]], %[[RT1]]

0 commit comments

Comments
 (0)