Skip to content

Commit 36f68a4

Browse files
committed
Revert "[mlir][tensor] Add TilingInterface support for fusing tensor.pad (llvm#105892)"
This reverts commit 91e57c6.
1 parent 385a31d commit 36f68a4

File tree

2 files changed

+0
-58
lines changed

2 files changed

+0
-58
lines changed

mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -67,23 +67,6 @@ struct PadOpTiling : public TilingInterface::ExternalModel<PadOpTiling, PadOp> {
6767
resultSizes.assign(sizes.begin(), sizes.end());
6868
return success();
6969
}
70-
71-
LogicalResult getIterationDomainTileFromResultTile(
72-
Operation *op, OpBuilder &b, unsigned resultNumber,
73-
ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
74-
SmallVectorImpl<OpFoldResult> &iterDomainOffsets,
75-
SmallVectorImpl<OpFoldResult> &iterDomainSizes) const {
76-
iterDomainOffsets.assign(offsets.begin(), offsets.end());
77-
iterDomainSizes.assign(sizes.begin(), sizes.end());
78-
return success();
79-
}
80-
81-
FailureOr<TilingResult>
82-
generateResultTileValue(Operation *op, OpBuilder &b, unsigned resultNumber,
83-
ArrayRef<OpFoldResult> offsets,
84-
ArrayRef<OpFoldResult> sizes) const {
85-
return getTiledImplementation(op, b, offsets, sizes);
86-
}
8770
};
8871

8972
template <typename OpTy>

mlir/test/Dialect/Tensor/tiling.mlir

Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -116,47 +116,6 @@ module attributes {transform.with_named_sequence} {
116116

117117
// -----
118118

119-
// CHECK-LABEL: func @fuse_static_pad_tensor_3_4(
120-
// CHECK-SAME: %[[IN:.*]]: tensor<7x9xf32>
121-
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
122-
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
123-
// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index
124-
// CHECK-DAG: %[[C15:.*]] = arith.constant 15 : index
125-
// CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index
126-
// CHECK: %[[RESULT:.*]] = scf.for {{.*}} = %[[C0]] to %[[C15]] step %[[C2]]
127-
// CHECK: scf.for {{.*}} = %[[C0]] to %[[C16]] step %[[C3]] iter_args(%[[INNER_OUT:.*]] =
128-
// CHECK: %[[SWAP_RESULT:.*]] = scf.if
129-
// CHECK: tensor.generate
130-
// CHECK: else
131-
// CHECK: %[[SLICE:.*]] = tensor.extract_slice %[[IN]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
132-
// CHECK: %[[PAD:.*]] = tensor.pad %[[SLICE]]
133-
// CHECK: %[[COPY:.*]] = linalg.copy ins(%[[SWAP_RESULT:.*]]
134-
// CHECK: tensor.insert_slice %[[COPY]] into %[[INNER_OUT]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
135-
// CHECK: return %[[RESULT]]
136-
137-
func.func @fuse_static_pad_tensor_3_4(%input_tensor: tensor<7x9xf32>,
138-
%pad_value: f32) -> tensor<15x16xf32> {
139-
%0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
140-
^bb0(%arg1: index, %arg2: index):
141-
tensor.yield %pad_value : f32
142-
} : tensor<7x9xf32> to tensor<15x16xf32>
143-
%empty = tensor.empty() : tensor<15x16xf32>
144-
%1 = linalg.copy ins(%0 : tensor<15x16xf32>) outs(%empty : tensor<15x16xf32>) -> tensor<15x16xf32>
145-
return %1 : tensor<15x16xf32>
146-
}
147-
148-
module attributes {transform.with_named_sequence} {
149-
transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
150-
%copy = transform.structured.match ops{["linalg.copy"]} in %arg1
151-
: (!transform.any_op) -> !transform.any_op
152-
%a, %b, %c = transform.structured.fuse %copy [2, 3]
153-
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
154-
transform.yield
155-
}
156-
}
157-
158-
// -----
159-
160119
// CHECK-LABEL: func @static_pad_tensor_0_3(
161120
// CHECK-SAME: %[[IN:.*]]: tensor<7x9xf32>
162121
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index

0 commit comments

Comments
 (0)