Skip to content

Commit 2a25200

Browse files
authored
[mlir][tensor] Restrict the verifier for tensor.pack/tensor.unpack (#113108)
Restricts the verifier for tensor.pack and tensor.unpack Ops so that the following is no longer allowed: ```mlir %c8 = arith.constant 8 : index %0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, %c8] into %output : tensor<?x?xf32> -> tensor<?x?x8x8xf32> ``` Specifically, in line with other Tensor Ops, require: * a dynamic dimensions for each (dynamic) SSA value, * a static dimension for each static size (attribute). In the example above, a static dimension (8) is mixed with a dynamic size (%c8). Note that this is mostly deleting existing code - that's because this change simplifies the logic in verifier. For more context: * https://discourse.llvm.org/t/tensor-ops-with-dynamic-sizes-which-behaviour-is-more-correct
1 parent 1cbc015 commit 2a25200

File tree

4 files changed

+57
-29
lines changed

4 files changed

+57
-29
lines changed

mlir/lib/Dialect/Tensor/IR/TensorOps.cpp

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3865,22 +3865,14 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
38653865
llvm::zip(packedType.getShape().take_back(mixedTiles.size()),
38663866
mixedTiles),
38673867
[](std::tuple<int64_t, OpFoldResult> it) {
3868-
std::optional<int64_t> constTileSize =
3869-
getConstantIntValue(std::get<1>(it));
38703868
int64_t shape = std::get<0>(it);
3871-
if (!constTileSize) {
3872-
// If specified tile size is dynamic, output shape should
3873-
// be dynamic too.
3874-
return ShapedType::isDynamic(shape);
3869+
if (Attribute attr =
3870+
llvm::dyn_cast_if_present<Attribute>(std::get<1>(it))) {
3871+
IntegerAttr intAttr = dyn_cast_or_null<IntegerAttr>(attr);
3872+
int64_t staticTileSize = intAttr.getValue().getSExtValue();
3873+
return shape == staticTileSize;
38753874
}
3876-
if (ShapedType::isDynamic(shape)) {
3877-
// For the shape being dynamic when tile size is
3878-
// specified, return true. In canonical form a constant
3879-
// tile size should lead to constant shape of the tiled
3880-
// dimension, but not needed for verification.
3881-
return true;
3882-
}
3883-
return shape == constTileSize.value();
3875+
return ShapedType::isDynamic(shape);
38843876
})) {
38853877
return op->emitError("mismatch in inner tile sizes specified and shaped of "
38863878
"tiled dimension in the packed type");

mlir/test/Dialect/Linalg/transform-lower-pack.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -586,7 +586,7 @@ module attributes {transform.with_named_sequence} {
586586

587587
// Check that we can lower unpack "as unpad" with dynamic dims.
588588
// CHECK-LABEL: func.func @unpack_as_pad_dynamic(
589-
// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x1x?x?x?x?xf32>, %[[ARG1:.*]]: tensor<?x?x?x?xf32>
589+
// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x1x136x64x16x16xf32>, %[[ARG1:.*]]: tensor<?x?x?x?xf32>
590590
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
591591
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
592592
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
@@ -602,10 +602,10 @@ module attributes {transform.with_named_sequence} {
602602
// CHECK-SAME: [1, 1, 1, 1, %[[DIM0]], %[[DIM1]], %[[DIM2]], %[[DIM3]]]
603603
// strides multiplers.
604604
// CHECK-SAME: [1, 1, 1, 1, 1, 1, 1, 1]
605-
// CHECK-SAME: : tensor<1x1x1x1x?x?x?x?xf32> to tensor<?x?x?x?xf32>
606-
func.func @unpack_as_pad_dynamic(%arg0: tensor<1x1x1x1x?x?x?x?xf32>, %arg1: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
605+
// CHECK-SAME: : tensor<1x1x1x1x136x64x16x16xf32> to tensor<?x?x?x?xf32>
606+
func.func @unpack_as_pad_dynamic(%arg0: tensor<1x1x1x1x136x64x16x16xf32>, %arg1: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
607607
%pack = tensor.unpack %arg0 inner_dims_pos = [0, 1, 2, 3] inner_tiles = [136, 64, 16, 16] into %arg1
608-
: tensor<1x1x1x1x?x?x?x?xf32> -> tensor<?x?x?x?xf32>
608+
: tensor<1x1x1x1x136x64x16x16xf32> -> tensor<?x?x?x?xf32>
609609
return %pack : tensor<?x?x?x?xf32>
610610
}
611611

mlir/test/Dialect/Tensor/fold-empty-op.mlir

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -77,20 +77,20 @@ func.func @pack_empty(%arg0: tensor<8x8x32x32xf32>) -> tensor<8x8x32x32xf32> {
7777
// CHECK-NOT: tensor.pack
7878
// CHECK: return %[[T]] : tensor<8x8x32x32xf32>
7979

80-
func.func @pack_empty_dynamic(%arg0: tensor<?x?x?x?xf32>, %dim0: index, %dim1: index) -> tensor<?x?x?x?xf32> {
80+
func.func @pack_empty_dynamic(%arg0: tensor<?x?x32x32xf32>, %dim0: index, %dim1: index) -> tensor<?x?x32x32xf32> {
8181
%empty_unpacked = tensor.empty(%dim0, %dim1) : tensor<?x?xf32>
8282
%packed = tensor.pack %empty_unpacked
8383
inner_dims_pos = [0, 1] inner_tiles = [32, 32]
84-
into %arg0 : tensor<?x?xf32> -> tensor<?x?x?x?xf32>
85-
return %packed : tensor<?x?x?x?xf32>
84+
into %arg0 : tensor<?x?xf32> -> tensor<?x?x32x32xf32>
85+
return %packed : tensor<?x?x32x32xf32>
8686
}
8787

8888
// CHECK-LABEL: func.func @pack_empty_dynamic(
89-
// CHECK-SAME: %[[T:.+]]: tensor<?x?x?x?xf32>,
89+
// CHECK-SAME: %[[T:.+]]: tensor<?x?x32x32xf32>,
9090
// CHECK-SAME: %[[DIM0:[a-zA-Z0-9_]+]]: index,
9191
// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index
9292
// CHECK-NOT: tensor.pack
93-
// CHECK: return %[[T]] : tensor<?x?x?x?xf32>
93+
// CHECK: return %[[T]] : tensor<?x?x32x32xf32>
9494

9595
func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> {
9696
%empty_packed = tensor.empty() : tensor<8x8x32x32xf32>
@@ -105,20 +105,18 @@ func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> {
105105
// CHECK-NOT: tensor.unpack
106106
// CHECK: return %[[T]] : tensor<256x256xf32>
107107

108-
func.func @unpack_empty_dynamic(%arg0: tensor<?x?xf32>, %dim0: index, %dim1: index, %dim2: index, %dim3: index) -> tensor<?x?xf32> {
109-
%empty_packed = tensor.empty(%dim0, %dim1, %dim2, %dim3) : tensor<?x?x?x?xf32>
108+
func.func @unpack_empty_dynamic(%arg0: tensor<?x?xf32>, %dim0: index, %dim1: index) -> tensor<?x?xf32> {
109+
%empty_packed = tensor.empty(%dim0, %dim1) : tensor<?x?x32x32xf32>
110110
%unpacked = tensor.unpack %empty_packed
111111
inner_dims_pos = [0, 1] inner_tiles = [32, 32]
112-
into %arg0 : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
112+
into %arg0 : tensor<?x?x32x32xf32> -> tensor<?x?xf32>
113113
return %unpacked : tensor<?x?xf32>
114114
}
115115

116116
// CHECK-LABEL: func.func @unpack_empty_dynamic(
117117
// CHECK-SAME: %[[T:.+]]: tensor<?x?xf32>,
118118
// CHECK-SAME: %[[DIM0:[a-zA-Z0-9_]+]]: index,
119-
// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index,
120-
// CHECK-SAME: %[[DIM2:[a-zA-Z0-9_]+]]: index,
121-
// CHECK-SAME: %[[DIM3:[a-zA-Z0-9_]+]]: index
119+
// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index
122120
// CHECK-NOT: tensor.unpack
123121
// CHECK: return %[[T]] : tensor<?x?xf32>
124122

mlir/test/Dialect/Tensor/invalid.mlir

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -755,9 +755,47 @@ func.func @pack_mismatch_inner_tile_size_and_output_shape(
755755

756756
// -----
757757

758+
func.func @pack_dynamic_inner_tile_size_and_static_output_shape(
759+
%input : tensor<?x?xf32>, %output : tensor<?x?x8x8xf32>) -> tensor<?x?x8x8xf32> {
760+
%c8 = arith.constant 8 : index
761+
// expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
762+
%0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, %c8] into %output : tensor<?x?xf32> -> tensor<?x?x8x8xf32>
763+
return %0 : tensor<?x?x8x8xf32>
764+
}
765+
766+
// -----
767+
768+
func.func @pack_static_inner_tile_size_and_dynamic_output_shape(
769+
%input : tensor<?x?xf32>, %output : tensor<?x?x8x?xf32>) -> tensor<?x?x8x?xf32> {
770+
// expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
771+
%0 = tensor.pack %input inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %output : tensor<?x?xf32> -> tensor<?x?x8x?xf32>
772+
return %0 : tensor<?x?x8x?xf32>
773+
}
774+
775+
// -----
776+
758777
func.func @unpack_mismatch_inner_tile_size_and_output_shape(
759778
%input : tensor<?x?x8x8xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
760779
// expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
761780
%0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?x8x8xf32> -> tensor<?x?xf32>
762781
return %0 : tensor<?x?xf32>
763782
}
783+
784+
// -----
785+
786+
func.func @unpack_dynamic_inner_tile_size_and_static_output_shape(
787+
%input : tensor<?x?x8x4xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
788+
%c8 = arith.constant 8 : index
789+
// expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
790+
%0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [%c8, 4] into %output : tensor<?x?x8x4xf32> -> tensor<?x?xf32>
791+
return %0 : tensor<?x?xf32>
792+
}
793+
794+
// -----
795+
796+
func.func @unpack_static_inner_tile_size_and_dynamic_output_shape(
797+
%input : tensor<?x?x?x4xf32>, %output : tensor<?x?xf32>) -> tensor<?x?xf32> {
798+
// expected-error@+1 {{mismatch in inner tile sizes specified and shaped of tiled dimension in the packed type}}
799+
%0 = tensor.unpack %input inner_dims_pos = [0, 1] inner_tiles = [8, 4] into %output : tensor<?x?x?x4xf32> -> tensor<?x?xf32>
800+
return %0 : tensor<?x?xf32>
801+
}

0 commit comments

Comments
 (0)