Skip to content

Commit 9fdb00c

Browse files
[fixup] Do minimal collapsing of the memref
1 parent 25f7bdd commit 9fdb00c

File tree

2 files changed

+68
-74
lines changed

2 files changed

+68
-74
lines changed

mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp

Lines changed: 10 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -581,17 +581,6 @@ static SmallVector<Value> getCollapsedIndices(RewriterBase &rewriter,
581581
}
582582

583583
namespace {
584-
585-
/// Helper function to return the index of the last dynamic dimension
586-
/// in `shape` or -1 if there are no dynamic dimensions.
587-
int64_t lastDynIndex(ArrayRef<int64_t> shape) {
588-
return static_cast<int64_t>(
589-
std::distance(
590-
std::find(shape.rbegin(), shape.rend(), ShapedType::kDynamic),
591-
shape.rend()) -
592-
1);
593-
}
594-
595584
/// Rewrites contiguous row-major vector.transfer_read ops by inserting
596585
/// memref.collapse_shape on the source so that the resulting
597586
/// vector.transfer_read has a 1D source. Requires the source shape to be
@@ -640,10 +629,11 @@ class FlattenContiguousRowMajorTransferReadPattern
640629
if (transferReadOp.getMask())
641630
return failure();
642631

643-
// Determinine the first memref dimension to collapse
644-
int64_t firstDimToCollapse = std::max(
645-
lastDynIndex(sourceType.getShape()),
646-
sourceType.getRank() - sourceType.getNumContiguousTrailingDims());
632+
// Determine the first memref dimension to collapse - just enough so we can
633+
// read a flattened vector.
634+
int64_t firstDimToCollapse =
635+
sourceType.getRank() -
636+
vectorType.getShape().drop_while([](auto v) { return v == 1; }).size();
647637

648638
// 1. Collapse the source memref
649639
Value collapsedSource =
@@ -735,10 +725,11 @@ class FlattenContiguousRowMajorTransferWritePattern
735725
if (transferWriteOp.getMask())
736726
return failure();
737727

738-
// Determinine the first memref dimension to collapse
739-
int64_t firstDimToCollapse = std::max(
740-
lastDynIndex(sourceType.getShape()),
741-
sourceType.getRank() - sourceType.getNumContiguousTrailingDims());
728+
// Determine the first memref dimension to collapse - just enough so we can
729+
// read a flattened vector.
730+
int64_t firstDimToCollapse =
731+
sourceType.getRank() -
732+
vectorType.getShape().drop_while([](auto v) { return v == 1; }).size();
742733

743734
// 1. Collapse the source memref
744735
Value collapsedSource =

mlir/test/Dialect/Vector/vector-transfer-flatten.mlir

Lines changed: 58 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,10 @@ func.func @transfer_read_dims_mismatch_contiguous_unit_dims(
8888
// CHECK-SAME: %[[MEM:.*]]: memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>>) -> vector<1x1x2x2xi8> {
8989
// CHECK: %[[VAL_1:.*]] = arith.constant 0 : i8
9090
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
91-
// CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[MEM]] {{\[\[}}0, 1, 2, 3]] : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<120xi8, strided<[1], offset: ?>>
92-
// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_3]]{{\[}}%[[VAL_2]]], %[[VAL_1]] {in_bounds = [true]} : memref<120xi8, strided<[1], offset: ?>>, vector<4xi8>
91+
// CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[MEM]]
92+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
93+
// CHECK-SAME: : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>
94+
// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_3]][%[[VAL_2]], %[[VAL_2]], %[[VAL_2]]], %[[VAL_1]] {in_bounds = [true]} : memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>, vector<4xi8>
9395
// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<4xi8> to vector<1x1x2x2xi8>
9496
// CHECK: return %[[VAL_5]] : vector<1x1x2x2xi8>
9597

@@ -116,10 +118,10 @@ func.func @transfer_read_dims_mismatch_contiguous_non_unit_dims(
116118
// CHECK: %[[C0_I8:.+]] = arith.constant 0 : i8
117119
// CHECK: %[[C0:.+]] = arith.constant 0 : index
118120
// CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
119-
// CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
120-
// CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<120xi8, {{.+}}>
121-
// CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED_MEM]][%[[C0]]], %[[C0_I8]] {in_bounds = [true]}
122-
// CHECK-SAME: : memref<120xi8, strided<[1], offset: ?>>, vector<12xi8>
121+
// CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
122+
// CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<5x24xi8, {{.+}}>
123+
// CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED_MEM]][%[[C0]], %[[C0]]], %[[C0_I8]] {in_bounds = [true]}
124+
// CHECK-SAME: : memref<5x24xi8, strided<[24, 1], offset: ?>>, vector<12xi8>
123125
// CHECK: %[[VEC:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi8> to vector<2x3x2xi8>
124126
// CHECK: return %[[VEC]] : vector<2x3x2xi8>
125127

@@ -141,17 +143,18 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
141143
return %res : vector<1x2x6xi32>
142144
}
143145

144-
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
146+
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0] -> (s0 * 6)>
145147

146148
// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_zero_indices(
147149
// CHECK-SAME: %[[IDX_1:.+]]: index, %[[IDX_2:.+]]: index,
148150
// CHECK-SAME: %[[MEM:.+]]: memref<1x43x4x6xi32>
149-
// CHECK: %[[C_0:.+]] = arith.constant 0 : i32
151+
// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
152+
// CHECK: %[[C_0:.+]] = arith.constant 0 : index
150153
// CHECK: %[[COLLAPSED_IN:.+]] = memref.collapse_shape %[[MEM]]
151-
// CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
152-
// CHECK-SAME: : memref<1x43x4x6xi32> into memref<1032xi32>
153-
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
154-
// CHECK: %[[READ:.+]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
154+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
155+
// CHECK-SAME: : memref<1x43x4x6xi32> into memref<1x43x24xi32>
156+
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$ATTR_0]]()[%[[IDX_2]]]
157+
// CHECK: %[[READ:.+]] = vector.transfer_read %[[COLLAPSED_IN]][%[[C_0]], %[[IDX_1]], %[[COLLAPSED_IDX]]], %[[C0_I32]] {in_bounds = [true]} : memref<1x43x24xi32>, vector<12xi32>
155158

156159
// CHECK-128B-LABEL: func @transfer_read_dims_mismatch_non_zero_indices(
157160
// CHECK-128B-NOT: memref.collapse_shape
@@ -202,18 +205,16 @@ func.func @transfer_read_leading_dynamic_dims(
202205
return %res : vector<8x4xi8>
203206
}
204207

205-
// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 32)>
206-
207208
// CHECK-LABEL: func @transfer_read_leading_dynamic_dims
208209
// CHECK-SAME: %[[MEM:.+]]: memref<?x?x8x4xi8, {{.+}}>, %[[IDX_1:.+]]: index, %[[IDX_2:.+]]: index
209210
// CHECK: %[[C0_I8:.+]] = arith.constant 0 : i8
210-
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]] {{\[}}[0], [1, 2, 3]{{\]}}
211-
// CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?xi8, {{.+}}>
212-
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_2]]]
211+
// CHECK: %[[C0:.+]] = arith.constant 0 : index
212+
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
213+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
214+
// CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?x32xi8, {{.+}}>
213215
// CHECK: %[[VEC1D:.+]] = vector.transfer_read %[[COLLAPSED]]
214-
// CHECK-SAME: [%[[IDX_1]], %[[COLLAPSED_IDX]]], %[[C0_I8]]
215-
// CHECK-SAME: {in_bounds = [true]}
216-
// CHECK-SAME: : memref<?x?xi8, {{.+}}>, vector<32xi8>
216+
// CHECK-SAME: [%[[IDX_1]], %[[IDX_2]], %[[C0]]], %[[C0_I8]]
217+
// CHECK-SAME: {in_bounds = [true]} : memref<?x?x32xi8, {{.+}}>, vector<32xi8>
217218
// CHECK: %[[RES:.+]] = vector.shape_cast %[[VEC1D]] : vector<32xi8> to vector<8x4xi8>
218219
// CHECK: return %[[RES]] : vector<8x4xi8>
219220

@@ -263,7 +264,7 @@ func.func @transfer_read_dynamic_dim_to_flatten(
263264
return %res : vector<1x2x6xi32>
264265
}
265266

266-
// CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
267+
// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 6)>
267268

268269
// CHECK-LABEL: func.func @transfer_read_dynamic_dim_to_flatten
269270
// CHECK-SAME: %[[IDX_1:arg0]]
@@ -272,11 +273,11 @@ func.func @transfer_read_dynamic_dim_to_flatten(
272273
// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
273274
// CHECK: %[[C0:.+]] = arith.constant 0 : index
274275
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
275-
// CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
276-
// CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?xi32>
277-
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
278-
// CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[COLLAPSED_IDX]]],
279-
// CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?xi32>, vector<12xi32>
276+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
277+
// CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?x24xi32>
278+
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_2]]]
279+
// CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[IDX_1]], %[[COLLAPSED_IDX]]],
280+
// CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?x24xi32>, vector<12xi32>
280281
// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
281282
// CHECK: return %[[RESULT]] : vector<1x2x6xi32>
282283

@@ -428,10 +429,12 @@ func.func @transfer_write_dims_mismatch_contiguous_unit_dims(
428429
// CHECK-LABEL: func.func @transfer_write_dims_mismatch_contiguous_unit_dims
429430
// CHECK-SAME: %[[MEM:.*]]: memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>>,
430431
// CHECK-SAME: %[[VEC:.*]]: vector<1x1x2x2xi8>) {
431-
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
432-
// CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[MEM]] {{\[\[}}0, 1, 2, 3]] : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<120xi8, strided<[1], offset: ?>>
433-
// CHECK: %[[VAL_4:.*]] = vector.shape_cast %[[VEC]] : vector<1x1x2x2xi8> to vector<4xi8>
434-
// CHECK: vector.transfer_write %[[VAL_4]], %[[VAL_3]]{{\[}}%[[VAL_2]]] {in_bounds = [true]} : vector<4xi8>, memref<120xi8, strided<[1], offset: ?>>
432+
// CHECK: %[[C0:.*]] = arith.constant 0 : index
433+
// CHECK: %[[COLLAPSED:.*]] = memref.collapse_shape %[[MEM]]
434+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
435+
// CHECK-SAME: : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>
436+
// CHECK: %[[VEC_1D:.*]] = vector.shape_cast %[[VEC]] : vector<1x1x2x2xi8> to vector<4xi8>
437+
// CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true]} : vector<4xi8>, memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>
435438

436439
// CHECK-128B-LABEL: func @transfer_write_dims_mismatch_contiguous_unit_dims(
437440
// CHECK-128B: memref.collapse_shape
@@ -451,13 +454,13 @@ func.func @transfer_write_dims_mismatch_contiguous_non_unit_dims(
451454
// CHECK-LABEL: func.func @transfer_write_dims_mismatch_contiguous_non_unit_dims
452455
// CHECK-SAME: %[[MEM:.+]]: memref<5x4x3x2xi8, {{.+}}>,
453456
// CHECK-SAME: %[[VEC:.+]]: vector<2x2xi8>
454-
// CHECK: %[[C0:.+]] = arith.constant 0 : index
455-
// CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
456-
// CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
457-
// CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<120xi8, {{.+}}>
458-
// CHECK: %[[VEC_1D:.+]] = vector.shape_cast %[[VEC]] : vector<2x2xi8> to vector<4xi8>
459-
// CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]]] {in_bounds = [true]}
460-
// CHECK-SAME: : vector<4xi8>, memref<120xi8, {{.+}}>
457+
// CHECK: %[[C0:.+]] = arith.constant 0 : index
458+
// CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
459+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
460+
// CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<5x4x6xi8, {{.+}}>
461+
// CHECK: %[[VEC_1D:.+]] = vector.shape_cast %[[VEC]] : vector<2x2xi8> to vector<4xi8>
462+
// CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true]}
463+
// CHECK-SAME: : vector<4xi8>, memref<5x4x6xi8, {{.+}}>
461464

462465
// CHECK-128B-LABEL: func @transfer_write_dims_mismatch_contiguous_non_unit_dims(
463466
// CHECK-128B: memref.collapse_shape
@@ -477,16 +480,18 @@ func.func @transfer_write_dims_mismatch_non_zero_indices(
477480
return
478481
}
479482

480-
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
483+
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0] -> (s0 * 6)>
481484

482485
// CHECK-LABEL: func.func @transfer_write_dims_mismatch_non_zero_indices(
483486
// CHECK-SAME: %[[IDX_1:.*]]: index, %[[IDX_2:.*]]: index,
484487
// CHECK-SAME: %[[MEM:.*]]: memref<1x43x4x6xi32>,
485488
// CHECK-SAME: %[[VEC:.*]]: vector<1x2x6xi32>) {
486-
// CHECK-DAG: %[[IDX:.*]] = affine.apply #[[$ATTR_0]](){{\[}}%[[IDX_1]], %[[IDX_2]]]
487-
// CHECK-DAG: %[[CS:.*]] = memref.collapse_shape %[[MEM]] {{\[\[}}0, 1, 2, 3]] : memref<1x43x4x6xi32> into memref<1032xi32>
489+
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
490+
// CHECK-DAG: %[[IDX:.*]] = affine.apply #[[$ATTR_0]]()[%[[IDX_2]]]
491+
// CHECK-DAG: %[[CS:.*]] = memref.collapse_shape %[[MEM]]
492+
// CHECK-DAG-SAME{LITERAL}: [[0], [1], [2, 3]] : memref<1x43x4x6xi32> into memref<1x43x24xi32>
488493
// CHECK: %[[SC:.*]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
489-
// CHECK: vector.transfer_write %[[SC]], %[[CS]][%[[IDX]]] {in_bounds = [true]} : vector<12xi32>, memref<1032xi32>
494+
// CHECK: vector.transfer_write %[[SC]], %[[CS]][%[[C0]], %[[IDX_1]], %[[IDX]]] {in_bounds = [true]} : vector<12xi32>, memref<1x43x24xi32>
490495

491496
// CHECK-128B-LABEL: func @transfer_write_dims_mismatch_non_zero_indices(
492497
// CHECK-128B-NOT: memref.collapse_shape
@@ -534,18 +539,16 @@ func.func @transfer_write_leading_dynamic_dims(
534539
return
535540
}
536541

537-
// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 32)>
538-
539542
// CHECK-LABEL: func @transfer_write_leading_dynamic_dims
540543
// CHECK-SAME: %[[VEC:.+]]: vector<8x4xi8>, %[[MEM:.+]]: memref<?x?x8x4xi8, {{.+}}>, %[[ARG2:.+]]: index, %[[ARG3:.+]]: index
541-
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]] {{\[}}[0], [1, 2, 3]{{\]}}
542-
// CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?xi8, {{.+}}>
543-
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[ARG3]]]
544+
// CHECK: %[[C0:.+]] = arith.constant 0 : index
545+
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
546+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
547+
// CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?x32xi8, {{.+}}>
544548
// CHECK: %[[VEC1D:.+]] = vector.shape_cast %[[VEC]] : vector<8x4xi8> to vector<32xi8>
545549
// CHECK: vector.transfer_write %[[VEC1D]], %[[COLLAPSED]]
546-
// CHECK-SAME: [%[[ARG2]], %[[COLLAPSED_IDX]]]
547-
// CHECK-SAME: {in_bounds = [true]}
548-
// CHECK-SAME: : vector<32xi8>, memref<?x?xi8, {{.+}}>
550+
// CHECK-SAME: [%[[ARG2]], %[[ARG3]], %[[C0]]] {in_bounds = [true]}
551+
// CHECK-SAME: : vector<32xi8>, memref<?x?x32xi8, {{.+}}>
549552

550553
// CHECK-128B-LABEL: func @transfer_write_leading_dynamic_dims
551554
// CHECK-128B: memref.collapse_shape
@@ -589,7 +592,7 @@ func.func @transfer_write_dynamic_dim_to_flatten(
589592
return
590593
}
591594

592-
// CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
595+
// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 6)>
593596

594597
// CHECK-LABEL: func.func @transfer_write_dynamic_dim_to_flatten
595598
// CHECK-SAME: %[[IDX_1:arg0]]: index
@@ -598,12 +601,12 @@ func.func @transfer_write_dynamic_dim_to_flatten(
598601
// CHECK-SAME: %[[MEM:arg3]]: memref<1x?x4x6xi32>
599602
// CHECK: %[[C0:.+]] = arith.constant 0 : index
600603
// CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
601-
// CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
602-
// CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x?xi32>
603-
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
604+
// CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
605+
// CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x?x24xi32>
606+
// CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_2]]]
604607
// CHECK: %[[VEC_1D:.+]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
605-
// CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[COLLAPSED_IDX]]]
606-
// CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x?xi32>
608+
// CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[IDX_1]], %[[COLLAPSED_IDX]]]
609+
// CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x?x24xi32>
607610

608611
// CHECK-128B-LABEL: func @transfer_write_dynamic_dim_to_flatten
609612
// CHECK-128B-NOT: memref.collapse_shape

0 commit comments

Comments
 (0)