Skip to content

Commit bb82092

Browse files
authored
[mlir][tensor] Make getMixedPadImpl return static values when possible. (llvm#85016)
If low and high are constants (i.e., not attributes), users still prefer attributes. Otherwise, there could be failures in type inference. A failure is introduced by llvm@60e562d, see the drop_known_unit_constant_low_high test for more details.
1 parent 9a3000c commit bb82092

File tree

4 files changed

+23
-5
lines changed

4 files changed

+23
-5
lines changed

mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1364,7 +1364,7 @@ def Tensor_PadOp : Tensor_Op<"pad", [
13641364
unsigned count = staticAttrs.size();
13651365
for (unsigned idx = 0; idx < count; ++idx) {
13661366
if (ShapedType::isDynamic(staticAttrs[idx]))
1367-
res.push_back(values[numDynamic++]);
1367+
res.push_back(getAsOpFoldResult(values[numDynamic++]));
13681368
else
13691369
res.push_back(builder.getI64IntegerAttr(staticAttrs[idx]));
13701370
}

mlir/test/Conversion/TensorToLinalg/tensor-ops-to-linalg.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> t
2222
// CHECK-LABEL: func @generalize_pad_tensor_dynamic_shape(
2323
// CHECK-SAME: %[[IN:.*]]: tensor<4x?x2x?xf32>,
2424
// CHECK-SAME: %[[OFFSET:.*]]: index) -> tensor<4x?x?x?xf32> {
25-
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
2625
// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
2726
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
2827
// CHECK: %[[DIM1:.*]] = tensor.dim %[[IN]], %[[C1]] : tensor<4x?x2x?xf32>
@@ -33,7 +32,7 @@ func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> t
3332
// CHECK: %[[OUT_DIM3:.*]] = arith.addi %[[DIM3]], %[[OFFSET]] : index
3433
// CHECK: %[[INIT:.*]] = tensor.empty(%[[DIM1]], %[[OUT_DIM2]], %[[OUT_DIM3]]) : tensor<4x?x?x?xf32>
3534
// CHECK: %[[FILL:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<4x?x?x?xf32>) -> tensor<4x?x?x?xf32>
36-
// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]]{{\[}}%[[C0]], %[[C0]], %[[OFFSET]], %[[C0]]] [4, %[[DIM1]], 2, %[[DIM3]]] [1, 1, 1, 1] : tensor<4x?x2x?xf32> into tensor<4x?x?x?xf32>
35+
// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]][0, 0, %[[OFFSET]], 0] [4, %[[DIM1]], 2, %[[DIM3]]] [1, 1, 1, 1] : tensor<4x?x2x?xf32> into tensor<4x?x?x?xf32>
3736
// CHECK: return %[[PADDED]] : tensor<4x?x?x?xf32>
3837
// CHECK: }
3938
func.func @generalize_pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: index) -> tensor<4x?x?x?xf32> {

mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1033,3 +1033,23 @@ func.func @do_not_drop_non_constant_padding(%arg0: tensor<1x1x3x1x1xf32>, %pad:
10331033
// CHECK-SLICES-LABEL: func @do_not_drop_non_constant_padding
10341034
// CHECK-SLICES: tensor.pad %{{.*}} low[0, 1, 0, %c0, 0] high[0, 0, 0, %c0, 2]
10351035
// CHECK-SLICES: } : tensor<1x1x3x1x1xf32> to tensor<1x2x3x1x3xf32>
1036+
1037+
// -----
1038+
1039+
func.func @drop_known_unit_constant_low_high(%arg0: tensor<1x383x128xf32>) -> tensor<1x384x128xf32> {
1040+
%c0 = arith.constant 0 : index
1041+
%c1 = arith.constant 1 : index
1042+
%cst = arith.constant 0.000000e+00 : f32
1043+
%padded = tensor.pad %arg0 low[%c0, %c1, %c0] high[%c0, %c0, %c0] {
1044+
^bb0(%arg1: index, %arg2: index, %arg3: index):
1045+
tensor.yield %cst : f32
1046+
} : tensor<1x383x128xf32> to tensor<1x384x128xf32>
1047+
return %padded : tensor<1x384x128xf32>
1048+
}
1049+
// CHECK-LABEL: func @drop_known_unit_constant_low_high
1050+
// CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape
1051+
// CHECK-SAME: {{\[}}[0, 1], [2]] : tensor<1x383x128xf32> into tensor<383x128xf32>
1052+
// CHECK: %[[PADDED:.+]] = tensor.pad %[[COLLAPSE]] low[1, 0] high[0, 0]
1053+
// CHECK: } : tensor<383x128xf32> to tensor<384x128xf32>
1054+
// CHECK: tensor.expand_shape %[[PADDED]]
1055+
// CHECK-SAME: {{\[}}[0, 1], [2]] : tensor<384x128xf32> into tensor<1x384x128xf32>

mlir/test/Dialect/Linalg/generalize-pad-tensor.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> t
1919
// CHECK-LABEL: func @generalize_pad_tensor_dynamic_shape(
2020
// CHECK-SAME: %[[IN:.*]]: tensor<4x?x2x?xf32>,
2121
// CHECK-SAME: %[[OFFSET:.*]]: index) -> tensor<4x?x?x?xf32> {
22-
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
2322
// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
2423
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
2524
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
@@ -32,7 +31,7 @@ func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> t
3231
// CHECK: %[[FILL:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<4x?x?x?xf32>) -> tensor<4x?x?x?xf32>
3332
// CHECK: %[[DIM1_1:.*]] = tensor.dim %[[IN]], %[[C1]] : tensor<4x?x2x?xf32>
3433
// CHECK: %[[DIM3_1:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor<4x?x2x?xf32>
35-
// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]]{{\[}}%[[C0]], %[[C0]], %[[OFFSET]], %[[C0]]] [4, %[[DIM1_1]], 2, %[[DIM3_1]]] [1, 1, 1, 1] : tensor<4x?x2x?xf32> into tensor<4x?x?x?xf32>
34+
// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]][0, 0, %[[OFFSET]], 0] [4, %[[DIM1_1]], 2, %[[DIM3_1]]] [1, 1, 1, 1] : tensor<4x?x2x?xf32> into tensor<4x?x?x?xf32>
3635
// CHECK: return %[[PADDED]] : tensor<4x?x?x?xf32>
3736
// CHECK: }
3837
func.func @generalize_pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: index) -> tensor<4x?x?x?xf32> {

0 commit comments

Comments
 (0)