Skip to content

Commit 370ae55

Browse files
author
Jerry Wu
committed
Finish tests
1 parent 3f79dc6 commit 370ae55

File tree

2 files changed

+139
-43
lines changed

2 files changed

+139
-43
lines changed

mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp

Lines changed: 61 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -559,20 +559,34 @@ projectToInnerMostNonUnitDimsPos(ArrayRef<int64_t> dimsPos,
559559
ArrayRef<int64_t> baseShape) {
560560
SmallVector<int64_t> projectedDimsPos;
561561
for (auto pos : dimsPos) {
562-
int64_t projectedPos = -1;
562+
// In the case all dims are unit, this will return the inner-most one.
563+
int64_t projectedPos = reassocIndices[pos].back();
563564
for (auto it = reassocIndices[pos].rbegin();
564565
it != reassocIndices[pos].rend(); ++it) {
565-
projectedPos = *it;
566-
if (baseShape[projectedPos] > 1) {
566+
int64_t dim = baseShape[*it];
567+
if (dim > 1 || ShapedType::isDynamic(dim)) {
568+
projectedPos = *it;
567569
break;
568570
}
569571
}
570-
assert(projectedPos != -1 && "projected dim not found");
571572
projectedDimsPos.push_back(projectedPos);
572573
}
573574
return projectedDimsPos;
574575
}
575576

577+
static bool
578+
isProjectedDimsDivisibleByTileSizes(ArrayRef<int64_t> projectedDimsPos,
579+
ArrayRef<int64_t> targetShape,
580+
ArrayRef<int64_t> tileSizes) {
581+
for (auto [projectedPos, tileSize] :
582+
llvm::zip_equal(projectedDimsPos, tileSizes)) {
583+
int64_t dim = targetShape[projectedPos];
584+
if (ShapedType::isDynamic(dim) || (dim % tileSize) != 0)
585+
return false;
586+
}
587+
return true;
588+
}
589+
576590
static int64_t applyPermutationAndReindexReassoc(
577591
SmallVector<ReassociationIndices> &reassociationIndices,
578592
ArrayRef<int64_t> dimsPerm) {
@@ -589,23 +603,24 @@ static int64_t applyPermutationAndReindexReassoc(
589603
}
590604

591605
/// Bubble up pack op through collapse shape op when the packed dims can be
592-
/// mapped to the source dims before collapsing. This is possible when the inner
593-
/// tile sizes can divide the mapped source dims.
606+
/// projected to the dims before collapsing. This is possible when the inner
607+
/// tile sizes can divide the projected dims.
594608
///
595609
/// For example:
596610
///
597-
/// %collapsed = tensor.collapse_shape %in [[0, 1], 2] : tensor<?x16x4xf32> into
598-
/// tensor<?x4xf32> %out = tensor.empty() : tensor<?x4x8x1xf32> %pack =
599-
/// tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1]
600-
/// inner_tiles = [8, 1] into %out : tensor<?x4xf32> -> tensor<?x4x8x1xf32>
611+
/// %collapsed = tensor.collapse_shape %in [[0, 1], 2]
612+
/// : tensor<?x16x4xf32> into tensor<?x4xf32>
613+
/// %pack = tensor.pack %collapsed outer_dims_perm = [0, 1]
614+
/// inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %empty
615+
/// : tensor<?x4xf32> -> tensor<?x4x8x1xf32>
601616
///
602617
/// Can be transformed into:
603618
///
604-
/// %out = tensor.empty() : tensor<?x2x4x8x1xf32>
605-
/// %pack = tensor.pack %in outer_dims_perm = [1, 2] inner_dims_pos = [1, 2]
606-
/// inner_tiles = [8, 1] into %out : tensor<?x16x4xf32> -> tensor<?x2x4x8x1xf32>
607-
/// %collapsed = tensor.collapse_shape %1 [[0, 1], 2, 3, 4] :
608-
/// tensor<?x2x4x8x1xf32> into tensor<?x4x8x1>
619+
/// %pack = tensor.pack %in outer_dims_perm = [1, 2]
620+
/// inner_dims_pos = [1, 2] inner_tiles = [8, 1] into %empty
621+
/// : tensor<?x16x4xf32> -> tensor<?x2x4x8x1xf32>
622+
/// %collapsed = tensor.collapse_shape %pack [[0, 1], 2, 3, 4]
623+
/// : tensor<?x2x4x8x1xf32> into tensor<?x4x8x1>
609624
static LogicalResult
610625
bubbleUpPackOpThroughCollapseShape(tensor::CollapseShapeOp collapseOp,
611626
tensor::PackOp packOp,
@@ -620,13 +635,9 @@ bubbleUpPackOpThroughCollapseShape(tensor::CollapseShapeOp collapseOp,
620635
SmallVector<int64_t> projectedInnerDimsPos =
621636
projectToInnerMostNonUnitDimsPos(innerDimsPos, reassocIndices, srcShape);
622637

623-
// Check if the projected dims on the source are divisible by the inner tile
624-
// sizes.
625-
for (auto [projectedPos, tileSize] :
626-
llvm::zip_equal(projectedInnerDimsPos, innerTileSizes)) {
627-
int64_t dim = srcShape[projectedPos];
628-
if (ShapedType::isDynamic(dim) || (dim % tileSize) != 0)
629-
return failure();
638+
if (!isProjectedDimsDivisibleByTileSizes(projectedInnerDimsPos, srcShape,
639+
innerTileSizes)) {
640+
return failure();
630641
}
631642
// Expand the outer dims permutation with the associated source dims for the
632643
// new permutation after bubbling. This is because moving a collapsed dim is
@@ -646,7 +657,9 @@ bubbleUpPackOpThroughCollapseShape(tensor::CollapseShapeOp collapseOp,
646657
packOp.getMixedTiles(), packOp.getPaddingValue(), newOuterDimsPerm);
647658

648659
SmallVector<ReassociationIndices> newReassocIndices = reassocIndices;
649-
// First build reassociations on the outer dims after the permutation.
660+
// First apply the permutation on the reassociations of the outer dims.
661+
// For example given the permutation [1, 0], the reassociations: [[0, 1], [2]]
662+
// -> [[0], [1, 2]]
650663
int64_t lastPos =
651664
applyPermutationAndReindexReassoc(newReassocIndices, outerDimsPerm);
652665
// Then add direct mapping for the inner tile dims.
@@ -698,6 +711,25 @@ class BubbleUpPackOpThroughReshapeOp final
698711
ControlPropagationFn controlFn;
699712
};
700713

714+
/// Push down unpack op through expand shape op when the packed dims can be
715+
/// projected to the dims after expanding. This is possible when the inner tile
716+
/// sizes can divide the projected dims.
717+
///
718+
/// For example:
719+
///
720+
/// %unpack = tensor.unpack %in outer_dims_perm = [0, 1]
721+
/// inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %empty
722+
/// : tensor<?x32x8x8xf32> -> tensor<?x256xf32>
723+
/// %expanded = tensor.expand_shape %unpack [[0, 1], [2]]
724+
/// : tensor<?x256xf32> into tensor<?x256x256xf32>
725+
///
726+
/// Can be transformed into:
727+
///
728+
/// %expanded = tensor.expand_shape %ain [[0, 1], [2], [3], [4]]
729+
/// : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
730+
/// %unpack = tensor.unpack %expanded outer_dims_perm = [0, 1, 2]
731+
/// inner_dims_pos = [1, 2] inner_tiles = [8, 8] into %empty
732+
/// : tensor<?x32x32x8x8xf32> -> tensor<?x256x256xf32>
701733
static LogicalResult
702734
pushDownUnPackOpThroughExpandShape(tensor::UnPackOp unPackOp,
703735
tensor::ExpandShapeOp expandOp,
@@ -712,15 +744,9 @@ pushDownUnPackOpThroughExpandShape(tensor::UnPackOp unPackOp,
712744
SmallVector<int64_t> projectedInnerDimsPos =
713745
projectToInnerMostNonUnitDimsPos(innerDimsPos, reassocIndices, dstShape);
714746

715-
// Check if the projected dims on the dest are divisible by the inner tile
716-
// sizes.
717-
for (auto [projectedPos, tileSize] :
718-
llvm::zip_equal(projectedInnerDimsPos, innerTileSizes)) {
719-
int64_t dim = dstShape[projectedPos];
720-
if (ShapedType::isDynamic(dim) ||
721-
(dstShape[projectedPos] % tileSize) != 0) {
722-
return failure();
723-
}
747+
if (!isProjectedDimsDivisibleByTileSizes(projectedInnerDimsPos, dstShape,
748+
innerTileSizes)) {
749+
return failure();
724750
}
725751
// Expand the outer dims permutation with the associated expanded dims for the
726752
// new permutation after pushing. This is because moving a source dim is
@@ -733,7 +759,9 @@ pushDownUnPackOpThroughExpandShape(tensor::UnPackOp unPackOp,
733759
}
734760

735761
SmallVector<ReassociationIndices> newReassocIndices = reassocIndices;
736-
// First build reassociations on the outer dims after the permutation.
762+
// First apply the permutation on the reassociations of the outer dims.
763+
// For example given the permutation [1, 0], the reassociations: [[0, 1], [2]]
764+
// -> [[0], [1, 2]]
737765
int64_t lastPos =
738766
applyPermutationAndReindexReassoc(newReassocIndices, outerDimsPerm);
739767
// Then add direct mapping for the inner tile dims.

mlir/test/Dialect/Linalg/data-layout-propagation.mlir

Lines changed: 78 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -906,58 +906,126 @@ func.func @unpack_different_destination_shape(%arg0: tensor<1x1x1080x1920x16xi32
906906
// CHECK-SAME: into %[[UNPACK_NEW_DEST]]
907907
// CHECK: return %[[UNPACK]] : tensor<16x540x960xi32>
908908

909-
func.func @bubble_up_pack_through_collapse(%1: tensor<192x16x64x4xf32>) -> tensor<384x256x8x1xf32> {
910-
%collapsed = tensor.collapse_shape %1 [[0, 1], [2, 3]] : tensor<192x16x64x4xf32> into tensor<3072x256xf32>
911-
%2 = tensor.empty() : tensor<384x256x8x1xf32>
912-
%pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %2 : tensor<3072x256xf32> -> tensor<384x256x8x1xf32>
913-
func.return %pack : tensor<384x256x8x1xf32>
909+
// -----
910+
911+
func.func @bubble_up_pack_through_collapse(%1: tensor<?x16x4xf32>, %dim : index) -> tensor<?x4x8x1xf32> {
912+
%collapsed = tensor.collapse_shape %1 [[0, 1], [2]] : tensor<?x16x4xf32> into tensor<?x4xf32>
913+
%2 = tensor.empty(%dim) : tensor<?x4x8x1xf32>
914+
%pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %2 : tensor<?x4xf32> -> tensor<?x4x8x1xf32>
915+
func.return %pack : tensor<?x4x8x1xf32>
914916
}
917+
// CHECK-LABEL: func.func @bubble_up_pack_through_collapse
918+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
919+
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
920+
// CHECK: %[[C0:.+]] = arith.constant 0 : index
921+
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x16x4xf32>
922+
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x2x4x8x1xf32>
923+
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [8, 1] into %[[EMPTY]] : tensor<?x16x4xf32> -> tensor<?x2x4x8x1xf32>
924+
// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[PACK]] {{\[}}[0, 1], [2], [3], [4]] : tensor<?x2x4x8x1xf32> into tensor<?x4x8x1xf32>
925+
// CHECK: return %[[COLLAPSED]] : tensor<?x4x8x1xf32>
926+
927+
// -----
915928

916929
func.func @bubble_up_permuted_pack_through_collapse(%1: tensor<4x192x16x256xf32>) -> tensor<4x32x3072x8x1xf32> {
917930
%collapsed = tensor.collapse_shape %1 [[0], [1, 2], [3]] : tensor<4x192x16x256xf32> into tensor<4x3072x256xf32>
918931
%2 = tensor.empty() : tensor<4x32x3072x8x1xf32>
919932
%pack = tensor.pack %collapsed outer_dims_perm = [0, 2, 1] inner_dims_pos = [2, 1] inner_tiles = [8, 1] into %2 : tensor<4x3072x256xf32> -> tensor<4x32x3072x8x1xf32>
920933
func.return %pack : tensor<4x32x3072x8x1xf32>
921934
}
935+
// CHECK-LABEL: func.func @bubble_up_permuted_pack_through_collapse
936+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
937+
// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<4x32x192x16x8x1xf32>
938+
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 3, 1, 2] inner_dims_pos = [3, 2] inner_tiles = [8, 1] into %[[EMPTY]] : tensor<4x192x16x256xf32> -> tensor<4x32x192x16x8x1xf32>
939+
// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %pack {{\[}}[0], [1], [2, 3], [4], [5]] : tensor<4x32x192x16x8x1xf32> into tensor<4x32x3072x8x1xf32>
940+
// CHECK: return %[[COLLAPSED]] : tensor<4x32x3072x8x1xf32>
941+
942+
// -----
922943

923944
func.func @bubble_up_pack_through_unit_collapse(%1: tensor<1x64x1x4xf32>) -> tensor<8x4x8x1xf32> {
924945
%collapsed = tensor.collapse_shape %1 [[0, 1, 2], [3]] : tensor<1x64x1x4xf32> into tensor<64x4xf32>
925946
%2 = tensor.empty() : tensor<8x4x8x1xf32>
926947
%pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %2 : tensor<64x4xf32> -> tensor<8x4x8x1xf32>
927948
func.return %pack : tensor<8x4x8x1xf32>
928949
}
950+
// CHECK-LABEL: func.func @bubble_up_pack_through_unit_collapse
951+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
952+
// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<1x8x1x4x8x1xf32>
953+
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 1, 2, 3] inner_dims_pos = [1, 3] inner_tiles = [8, 1] into %[[EMPTY]] : tensor<1x64x1x4xf32> -> tensor<1x8x1x4x8x1xf32>
954+
// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[PACK]] {{\[}}[0, 1, 2], [3], [4], [5]] : tensor<1x8x1x4x8x1xf32> into tensor<8x4x8x1xf32>
955+
// CHECK: return %[[COLLAPSED]] : tensor<8x4x8x1xf32>
956+
957+
// -----
929958

930959
func.func @no_bubble_up_pack_through_non_divisible_collapse(%1: tensor<3072x64x4xf32>) -> tensor<384x32x8x8xf32> {
931960
%collapsed = tensor.collapse_shape %1 [[0], [1, 2]] : tensor<3072x64x4xf32> into tensor<3072x256xf32>
932961
%2 = tensor.empty() : tensor<384x32x8x8xf32>
933962
%pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %2 : tensor<3072x256xf32> -> tensor<384x32x8x8xf32>
934963
func.return %pack : tensor<384x32x8x8xf32>
935964
}
965+
// CHECK-LABEL: func.func @no_bubble_up_pack_through_non_divisible_collapse
966+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
967+
// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0], [1, 2]] : tensor<3072x64x4xf32> into tensor<3072x256xf32>
968+
// CHECK: %[[PACK:.+]] = tensor.pack %[[COLLAPSED]]
969+
// CHECK: return %[[PACK]] : tensor<384x32x8x8xf32>
936970

937-
func.func @push_down_unpack_through_expand(%5: tensor<384x32x8x8xf32>) -> tensor<12x256x256xf32> {
938-
%6 = tensor.empty() : tensor<3072x256xf32>
939-
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<384x32x8x8xf32> -> tensor<3072x256xf32>
940-
%expanded = tensor.expand_shape %unpack [[0, 1], [2]] : tensor<3072x256xf32> into tensor<12x256x256xf32>
941-
func.return %expanded : tensor<12x256x256xf32>
971+
// -----
972+
973+
func.func @push_down_unpack_through_expand(%5: tensor<?x32x8x8xf32>, %dim: index) -> tensor<?x256x256xf32> {
974+
%6 = tensor.empty(%dim) : tensor<?x256xf32>
975+
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<?x32x8x8xf32> -> tensor<?x256xf32>
976+
%expanded = tensor.expand_shape %unpack [[0, 1], [2]] : tensor<?x256xf32> into tensor<?x256x256xf32>
977+
func.return %expanded : tensor<?x256x256xf32>
942978
}
979+
// CHECK-LABEL: func.func @push_down_unpack_through_expand
980+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
981+
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
982+
// CHECK: %[[C0:.+]] = arith.constant 0 : index
983+
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
984+
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
985+
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
986+
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED:.+]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<?x32x32x8x8xf32> -> tensor<?x256x256xf32>
987+
// CHECK: return %[[UNPACK]] : tensor<?x256x256xf32>
988+
989+
// -----
943990

944991
func.func @push_down_permuted_unpack_through_expand(%5: tensor<4x32x384x8x8xf32>) -> tensor<4x12x256x256xf32> {
945992
%6 = tensor.empty() : tensor<4x3072x256xf32>
946993
%unpack = tensor.unpack %5 outer_dims_perm = [0, 2, 1] inner_dims_pos = [2, 1] inner_tiles = [8, 8] into %6 : tensor<4x32x384x8x8xf32> -> tensor<4x3072x256xf32>
947994
%expanded = tensor.expand_shape %unpack [[0], [1, 2], [3]] : tensor<4x3072x256xf32> into tensor<4x12x256x256xf32>
948995
func.return %expanded : tensor<4x12x256x256xf32>
949996
}
997+
// CHECK-LABEL: @push_down_permuted_unpack_through_expand
998+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
999+
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0], [1], [2, 3], [4], [5]] : tensor<4x32x384x8x8xf32> into tensor<4x32x12x32x8x8xf32>
1000+
// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<4x12x256x256xf32>
1001+
// CHECK: %[[UNPACL:.+]] = tensor.unpack %[[EXPANDED]] outer_dims_perm = [0, 3, 1, 2] inner_dims_pos = [3, 2] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<4x32x12x32x8x8xf32> -> tensor<4x12x256x256xf32>
1002+
// CHECK: return %[[UNPACK]] : tensor<4x12x256x256xf32>
1003+
1004+
// -----
9501005

9511006
func.func @push_down_unpack_through_unit_expand(%5: tensor<6x32x8x8xf32>) -> tensor<3x16x1x256xf32> {
9521007
%6 = tensor.empty() : tensor<48x256xf32>
9531008
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<6x32x8x8xf32> -> tensor<48x256xf32>
9541009
%expanded = tensor.expand_shape %unpack [[0, 1, 2], [3]] : tensor<48x256xf32> into tensor<3x16x1x256xf32>
9551010
func.return %expanded : tensor<3x16x1x256xf32>
9561011
}
1012+
// CHECK-LABEL: func.func @push_down_unpack_through_unit_expand
1013+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
1014+
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1, 2], [3], [4], [5]] : tensor<6x32x8x8xf32> into tensor<3x2x1x32x8x8xf32>
1015+
// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<3x16x1x256xf32>
1016+
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED]] outer_dims_perm = [0, 1, 2, 3] inner_dims_pos = [1, 3] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<3x2x1x32x8x8xf32> -> tensor<3x16x1x256xf32>
1017+
// CHECK: return %[[UNPACK]] : tensor<3x16x1x256xf32>
1018+
1019+
// -----
9571020

9581021
func.func @no_push_down_unpack_through_non_divisible_expand(%5: tensor<384x32x8x8xf32>) -> tensor<256x12x256xf32> {
9591022
%6 = tensor.empty() : tensor<3072x256xf32>
9601023
%unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<384x32x8x8xf32> -> tensor<3072x256xf32>
9611024
%expanded = tensor.expand_shape %unpack [[0, 1], [2]] : tensor<3072x256xf32> into tensor<256x12x256xf32>
9621025
func.return %expanded : tensor<256x12x256xf32>
9631026
}
1027+
// CHECK-LABEL: func.func @no_push_down_unpack_through_non_divisible_expand
1028+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
1029+
// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]]
1030+
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[UNPACK]] {{\[}}[0, 1], [2]] : tensor<3072x256xf32> into tensor<256x12x256xf32>
1031+
// CHECK: return %[[EXPANDED]] : tensor<256x12x256xf32>

0 commit comments

Comments
 (0)