Skip to content

Commit c6b96ae

Browse files
author
Tobias Gysi
committed
[mlir][linalg] Remove IndexedGenericOp support from LinalgBufferize...
after introducing the IndexedGenericOp to GenericOp canonicalization (https://reviews.llvm.org/D101612). Differential Revision: https://reviews.llvm.org/D102308
1 parent 7d0a81c commit c6b96ae

File tree

2 files changed

+9
-40
lines changed

2 files changed

+9
-40
lines changed

mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -71,18 +71,17 @@ allocateBuffersForResults(Location loc, LinalgOp linalgOp, ValueRange outputs,
7171
return success();
7272
}
7373

74-
/// Specialization for `linalg::GenericOp` and `linalg::IndexedGenericOp`.
74+
/// Specialization for `linalg::GenericOp`.
7575
/// A pattern to convert Generic Linalg operations which work on tensors to
7676
/// use buffers. BufferPlacement pass should be later used to move
7777
/// Alloc operations to the correct positions and insert the missing Dealloc
7878
/// operations in the correct places.
79-
template <typename GenericOpTy>
8079
static void
8180
finalizeBufferAllocationForGenericOp(ConversionPatternRewriter &rewriter,
82-
GenericOpTy genericOp, ValueRange inputs,
81+
GenericOp genericOp, ValueRange inputs,
8382
ValueRange outputs) {
8483
// Generate a new linalg operation that works on buffers.
85-
auto newGenericOp = rewriter.create<GenericOpTy>(
84+
auto newGenericOp = rewriter.create<GenericOp>(
8685
genericOp.getLoc(),
8786
/*resultTensorTypes=*/llvm::None,
8887
/*inputs=*/inputs,
@@ -116,7 +115,6 @@ static void finalizeBufferAllocation(ConversionPatternRewriter &rewriter,
116115
linalg::LinalgOp linalgOp,
117116
ValueRange inputs, ValueRange outputs) {
118117
assert(!isa<linalg::GenericOp>(linalgOp.getOperation()));
119-
assert(!isa<linalg::IndexedGenericOp>(linalgOp.getOperation()));
120118
SmallVector<Value, 8> newOperands = inputs;
121119
newOperands.append(outputs.begin(), outputs.end());
122120
auto otherOperands = linalgOp.getAssumedNonShapedOperands();
@@ -195,6 +193,10 @@ class BufferizeAnyLinalgOp : public OpInterfaceConversionPattern<LinalgOp> {
195193
LogicalResult
196194
matchAndRewrite(LinalgOp op, ArrayRef<Value> operands,
197195
ConversionPatternRewriter &rewriter) const final {
196+
// Canonicalize indexed generic operations before bufferization.
197+
if (isa<IndexedGenericOp>(op))
198+
return failure();
199+
198200
// GenericOpAdaptor below expects an `operand_segment_sizes` attribute.
199201
if (!op->hasAttr("operand_segment_sizes"))
200202
return failure();
@@ -215,15 +217,8 @@ class BufferizeAnyLinalgOp : public OpInterfaceConversionPattern<LinalgOp> {
215217

216218
// Delegate to the linalg generic pattern.
217219
if (auto genericOp = dyn_cast<linalg::GenericOp>(*op)) {
218-
finalizeBufferAllocationForGenericOp<GenericOp>(
219-
rewriter, genericOp, adaptor.inputs(), newOutputBuffers);
220-
return success();
221-
}
222-
223-
// Delegate to the linalg indexed generic pattern.
224-
if (auto genericOp = dyn_cast<linalg::IndexedGenericOp>(*op)) {
225-
finalizeBufferAllocationForGenericOp<IndexedGenericOp>(
226-
rewriter, genericOp, adaptor.inputs(), newOutputBuffers);
220+
finalizeBufferAllocationForGenericOp(rewriter, genericOp,
221+
adaptor.inputs(), newOutputBuffers);
227222
return success();
228223
}
229224

mlir/test/Dialect/Linalg/bufferize.mlir

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -91,32 +91,6 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
9191

9292
// -----
9393

94-
#map0 = affine_map<(d0) -> (d0)>
95-
96-
// CHECK-LABEL: func @multiple_results_indexed
97-
// CHECK: %[[RESULT0:.*]] = memref.alloc() : memref<4xi32>
98-
// CHECK: %[[RESULT1:.*]] = memref.alloc() : memref<4xi32>
99-
// CHECK: linalg.generic
100-
// CHECK-SAME: ins(%{{.*}} : memref<4xi32>)
101-
// CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref<4xi32>, memref<4xi32>)
102-
// CHECK-NEXT: ^bb0(%{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32):
103-
func @multiple_results_indexed(%arg0: tensor<4xi32>)
104-
-> (tensor<4xi32>, tensor<4xi32>) {
105-
%0, %1 = linalg.indexed_generic {
106-
indexing_maps = [#map0, #map0, #map0],
107-
iterator_types = ["parallel"]
108-
} ins(%arg0 : tensor<4xi32>)
109-
outs (%arg0, %arg0 : tensor<4xi32>, tensor<4xi32>) {
110-
^bb0(%i: index, %gen_arg1: i32, %out1: i32, %out2: i32):
111-
%i_i32 = index_cast %i : index to i32
112-
%tmp1 = addi %gen_arg1, %i_i32 : i32
113-
linalg.yield %tmp1, %tmp1 : i32, i32
114-
} -> tensor<4xi32>, tensor<4xi32>
115-
return %0, %1 : tensor<4xi32>, tensor<4xi32>
116-
}
117-
118-
// -----
119-
12094
#map_2d = affine_map<(d0, d1) -> (d0, d1)>
12195

12296
// Check that the allocs properly consider the different shapes of the output

0 commit comments

Comments
 (0)