@@ -88,8 +88,10 @@ func.func @transfer_read_dims_mismatch_contiguous_unit_dims(
88
88
// CHECK-SAME: %[[MEM:.*]]: memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>>) -> vector<1x1x2x2xi8> {
89
89
// CHECK: %[[VAL_1:.*]] = arith.constant 0 : i8
90
90
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
91
- // CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[MEM]] {{\[\[}}0, 1, 2, 3]] : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<120xi8, strided<[1], offset: ?>>
92
- // CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_3]]{{\[}}%[[VAL_2]]], %[[VAL_1]] {in_bounds = [true]} : memref<120xi8, strided<[1], offset: ?>>, vector<4xi8>
91
+ // CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[MEM]]
92
+ // CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
93
+ // CHECK-SAME: : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>
94
+ // CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_3]][%[[VAL_2]], %[[VAL_2]], %[[VAL_2]]], %[[VAL_1]] {in_bounds = [true]} : memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>, vector<4xi8>
93
95
// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<4xi8> to vector<1x1x2x2xi8>
94
96
// CHECK: return %[[VAL_5]] : vector<1x1x2x2xi8>
95
97
@@ -116,10 +118,10 @@ func.func @transfer_read_dims_mismatch_contiguous_non_unit_dims(
116
118
// CHECK: %[[C0_I8:.+]] = arith.constant 0 : i8
117
119
// CHECK: %[[C0:.+]] = arith.constant 0 : index
118
120
// CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
119
- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
120
- // CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<120xi8 , {{.+}}>
121
- // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED_MEM]][%[[C0]]], %[[C0_I8]] {in_bounds = [true]}
122
- // CHECK-SAME: : memref<120xi8 , strided<[1], offset: ?>>, vector<12xi8>
121
+ // CHECK-SAME{LITERAL}: [[0], [ 1, 2, 3]]
122
+ // CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<5x24xi8 , {{.+}}>
123
+ // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED_MEM]][%[[C0]], %[[C0]] ], %[[C0_I8]] {in_bounds = [true]}
124
+ // CHECK-SAME: : memref<5x24xi8 , strided<[24, 1], offset: ?>>, vector<12xi8>
123
125
// CHECK: %[[VEC:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi8> to vector<2x3x2xi8>
124
126
// CHECK: return %[[VEC]] : vector<2x3x2xi8>
125
127
@@ -141,17 +143,18 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
141
143
return %res : vector <1 x2 x6 xi32 >
142
144
}
143
145
144
- // CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1 ] -> (s0 * 24 + s1 * 6)>
146
+ // CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0] -> (s0 * 6)>
145
147
146
148
// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_zero_indices(
147
149
// CHECK-SAME: %[[IDX_1:.+]]: index, %[[IDX_2:.+]]: index,
148
150
// CHECK-SAME: %[[MEM:.+]]: memref<1x43x4x6xi32>
149
- // CHECK: %[[C_0:.+]] = arith.constant 0 : i32
151
+ // CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
152
+ // CHECK: %[[C_0:.+]] = arith.constant 0 : index
150
153
// CHECK: %[[COLLAPSED_IN:.+]] = memref.collapse_shape %[[MEM]]
151
- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
152
- // CHECK-SAME: : memref<1x43x4x6xi32> into memref<1032xi32 >
153
- // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[ IDX_2]]]
154
- // CHECK: %[[READ:.+]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0 ]] {in_bounds = [true]} : memref<1032xi32 >, vector<12xi32>
154
+ // CHECK-SAME{LITERAL}: [[0], [1], [ 2, 3]]
155
+ // CHECK-SAME: : memref<1x43x4x6xi32> into memref<1x43x24xi32 >
156
+ // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$ATTR_0]]()[%[[IDX_2]]]
157
+ // CHECK: %[[READ:.+]] = vector.transfer_read %[[COLLAPSED_IN]][%[[C_0]], %[[IDX_1]], %[[ COLLAPSED_IDX]]], %[[C0_I32 ]] {in_bounds = [true]} : memref<1x43x24xi32 >, vector<12xi32>
155
158
156
159
// CHECK-128B-LABEL: func @transfer_read_dims_mismatch_non_zero_indices(
157
160
// CHECK-128B-NOT: memref.collapse_shape
@@ -202,18 +205,16 @@ func.func @transfer_read_leading_dynamic_dims(
202
205
return %res : vector <8 x4 xi8 >
203
206
}
204
207
205
- // CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 32)>
206
-
207
208
// CHECK-LABEL: func @transfer_read_leading_dynamic_dims
208
209
// CHECK-SAME: %[[MEM:.+]]: memref<?x?x8x4xi8, {{.+}}>, %[[IDX_1:.+]]: index, %[[IDX_2:.+]]: index
209
210
// CHECK: %[[C0_I8:.+]] = arith.constant 0 : i8
210
- // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]] {{\[}}[0], [1, 2, 3]{{\]}}
211
- // CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?xi8, {{.+}}>
212
- // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_2]]]
211
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
212
+ // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
213
+ // CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
214
+ // CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?x32xi8, {{.+}}>
213
215
// CHECK: %[[VEC1D:.+]] = vector.transfer_read %[[COLLAPSED]]
214
- // CHECK-SAME: [%[[IDX_1]], %[[COLLAPSED_IDX]]], %[[C0_I8]]
215
- // CHECK-SAME: {in_bounds = [true]}
216
- // CHECK-SAME: : memref<?x?xi8, {{.+}}>, vector<32xi8>
216
+ // CHECK-SAME: [%[[IDX_1]], %[[IDX_2]], %[[C0]]], %[[C0_I8]]
217
+ // CHECK-SAME: {in_bounds = [true]} : memref<?x?x32xi8, {{.+}}>, vector<32xi8>
217
218
// CHECK: %[[RES:.+]] = vector.shape_cast %[[VEC1D]] : vector<32xi8> to vector<8x4xi8>
218
219
// CHECK: return %[[RES]] : vector<8x4xi8>
219
220
@@ -263,7 +264,7 @@ func.func @transfer_read_dynamic_dim_to_flatten(
263
264
return %res : vector <1 x2 x6 xi32 >
264
265
}
265
266
266
- // CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1 ] -> (s0 * 24 + s1 * 6)>
267
+ // CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 6)>
267
268
268
269
// CHECK-LABEL: func.func @transfer_read_dynamic_dim_to_flatten
269
270
// CHECK-SAME: %[[IDX_1:arg0]]
@@ -272,11 +273,11 @@ func.func @transfer_read_dynamic_dim_to_flatten(
272
273
// CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
273
274
// CHECK: %[[C0:.+]] = arith.constant 0 : index
274
275
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
275
- // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
276
- // CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?xi32 >
277
- // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[ IDX_2]]]
278
- // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[COLLAPSED_IDX]]],
279
- // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?xi32 >, vector<12xi32>
276
+ // CHECK-SAME{LITERAL}: [[0], [1], [ 2, 3]]
277
+ // CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?x24xi32 >
278
+ // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_2]]]
279
+ // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[IDX_1]], %[[ COLLAPSED_IDX]]],
280
+ // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?x24xi32 >, vector<12xi32>
280
281
// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
281
282
// CHECK: return %[[RESULT]] : vector<1x2x6xi32>
282
283
@@ -428,10 +429,12 @@ func.func @transfer_write_dims_mismatch_contiguous_unit_dims(
428
429
// CHECK-LABEL: func.func @transfer_write_dims_mismatch_contiguous_unit_dims
429
430
// CHECK-SAME: %[[MEM:.*]]: memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>>,
430
431
// CHECK-SAME: %[[VEC:.*]]: vector<1x1x2x2xi8>) {
431
- // CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
432
- // CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[MEM]] {{\[\[}}0, 1, 2, 3]] : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<120xi8, strided<[1], offset: ?>>
433
- // CHECK: %[[VAL_4:.*]] = vector.shape_cast %[[VEC]] : vector<1x1x2x2xi8> to vector<4xi8>
434
- // CHECK: vector.transfer_write %[[VAL_4]], %[[VAL_3]]{{\[}}%[[VAL_2]]] {in_bounds = [true]} : vector<4xi8>, memref<120xi8, strided<[1], offset: ?>>
432
+ // CHECK: %[[C0:.*]] = arith.constant 0 : index
433
+ // CHECK: %[[COLLAPSED:.*]] = memref.collapse_shape %[[MEM]]
434
+ // CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
435
+ // CHECK-SAME: : memref<5x4x3x2xi8, strided<[24, 6, 2, 1], offset: ?>> into memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>
436
+ // CHECK: %[[VEC_1D:.*]] = vector.shape_cast %[[VEC]] : vector<1x1x2x2xi8> to vector<4xi8>
437
+ // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true]} : vector<4xi8>, memref<5x4x6xi8, strided<[24, 6, 1], offset: ?>>
435
438
436
439
// CHECK-128B-LABEL: func @transfer_write_dims_mismatch_contiguous_unit_dims(
437
440
// CHECK-128B: memref.collapse_shape
@@ -451,13 +454,13 @@ func.func @transfer_write_dims_mismatch_contiguous_non_unit_dims(
451
454
// CHECK-LABEL: func.func @transfer_write_dims_mismatch_contiguous_non_unit_dims
452
455
// CHECK-SAME: %[[MEM:.+]]: memref<5x4x3x2xi8, {{.+}}>,
453
456
// CHECK-SAME: %[[VEC:.+]]: vector<2x2xi8>
454
- // CHECK: %[[C0:.+]] = arith.constant 0 : index
455
- // CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
456
- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
457
- // CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<120xi8 , {{.+}}>
458
- // CHECK: %[[VEC_1D:.+]] = vector.shape_cast %[[VEC]] : vector<2x2xi8> to vector<4xi8>
459
- // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]]] {in_bounds = [true]}
460
- // CHECK-SAME: : vector<4xi8>, memref<120xi8 , {{.+}}>
457
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
458
+ // CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
459
+ // CHECK-SAME{LITERAL}: [[0], [1], [ 2, 3]]
460
+ // CHECK-SAME: : memref<5x4x3x2xi8, {{.+}}> into memref<5x4x6xi8 , {{.+}}>
461
+ // CHECK: %[[VEC_1D:.+]] = vector.shape_cast %[[VEC]] : vector<2x2xi8> to vector<4xi8>
462
+ // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true]}
463
+ // CHECK-SAME: : vector<4xi8>, memref<5x4x6xi8 , {{.+}}>
461
464
462
465
// CHECK-128B-LABEL: func @transfer_write_dims_mismatch_contiguous_non_unit_dims(
463
466
// CHECK-128B: memref.collapse_shape
@@ -477,16 +480,18 @@ func.func @transfer_write_dims_mismatch_non_zero_indices(
477
480
return
478
481
}
479
482
480
- // CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1 ] -> (s0 * 24 + s1 * 6)>
483
+ // CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0] -> (s0 * 6)>
481
484
482
485
// CHECK-LABEL: func.func @transfer_write_dims_mismatch_non_zero_indices(
483
486
// CHECK-SAME: %[[IDX_1:.*]]: index, %[[IDX_2:.*]]: index,
484
487
// CHECK-SAME: %[[MEM:.*]]: memref<1x43x4x6xi32>,
485
488
// CHECK-SAME: %[[VEC:.*]]: vector<1x2x6xi32>) {
486
- // CHECK-DAG: %[[IDX:.*]] = affine.apply #[[$ATTR_0]](){{\[}}%[[IDX_1]], %[[IDX_2]]]
487
- // CHECK-DAG: %[[CS:.*]] = memref.collapse_shape %[[MEM]] {{\[\[}}0, 1, 2, 3]] : memref<1x43x4x6xi32> into memref<1032xi32>
489
+ // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
490
+ // CHECK-DAG: %[[IDX:.*]] = affine.apply #[[$ATTR_0]]()[%[[IDX_2]]]
491
+ // CHECK-DAG: %[[CS:.*]] = memref.collapse_shape %[[MEM]]
492
+ // CHECK-DAG-SAME{LITERAL}: [[0], [1], [2, 3]] : memref<1x43x4x6xi32> into memref<1x43x24xi32>
488
493
// CHECK: %[[SC:.*]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
489
- // CHECK: vector.transfer_write %[[SC]], %[[CS]][%[[IDX]]] {in_bounds = [true]} : vector<12xi32>, memref<1032xi32 >
494
+ // CHECK: vector.transfer_write %[[SC]], %[[CS]][%[[C0]], %[[IDX_1]], %[[ IDX]]] {in_bounds = [true]} : vector<12xi32>, memref<1x43x24xi32 >
490
495
491
496
// CHECK-128B-LABEL: func @transfer_write_dims_mismatch_non_zero_indices(
492
497
// CHECK-128B-NOT: memref.collapse_shape
@@ -534,18 +539,16 @@ func.func @transfer_write_leading_dynamic_dims(
534
539
return
535
540
}
536
541
537
- // CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 32)>
538
-
539
542
// CHECK-LABEL: func @transfer_write_leading_dynamic_dims
540
543
// CHECK-SAME: %[[VEC:.+]]: vector<8x4xi8>, %[[MEM:.+]]: memref<?x?x8x4xi8, {{.+}}>, %[[ARG2:.+]]: index, %[[ARG3:.+]]: index
541
- // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]] {{\[}}[0], [1, 2, 3]{{\]}}
542
- // CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?xi8, {{.+}}>
543
- // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[ARG3]]]
544
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
545
+ // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
546
+ // CHECK-SAME{LITERAL}: [[0], [1], [2, 3]]
547
+ // CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?x32xi8, {{.+}}>
544
548
// CHECK: %[[VEC1D:.+]] = vector.shape_cast %[[VEC]] : vector<8x4xi8> to vector<32xi8>
545
549
// CHECK: vector.transfer_write %[[VEC1D]], %[[COLLAPSED]]
546
- // CHECK-SAME: [%[[ARG2]], %[[COLLAPSED_IDX]]]
547
- // CHECK-SAME: {in_bounds = [true]}
548
- // CHECK-SAME: : vector<32xi8>, memref<?x?xi8, {{.+}}>
550
+ // CHECK-SAME: [%[[ARG2]], %[[ARG3]], %[[C0]]] {in_bounds = [true]}
551
+ // CHECK-SAME: : vector<32xi8>, memref<?x?x32xi8, {{.+}}>
549
552
550
553
// CHECK-128B-LABEL: func @transfer_write_leading_dynamic_dims
551
554
// CHECK-128B: memref.collapse_shape
@@ -589,7 +592,7 @@ func.func @transfer_write_dynamic_dim_to_flatten(
589
592
return
590
593
}
591
594
592
- // CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1 ] -> (s0 * 24 + s1 * 6)>
595
+ // CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 6)>
593
596
594
597
// CHECK-LABEL: func.func @transfer_write_dynamic_dim_to_flatten
595
598
// CHECK-SAME: %[[IDX_1:arg0]]: index
@@ -598,12 +601,12 @@ func.func @transfer_write_dynamic_dim_to_flatten(
598
601
// CHECK-SAME: %[[MEM:arg3]]: memref<1x?x4x6xi32>
599
602
// CHECK: %[[C0:.+]] = arith.constant 0 : index
600
603
// CHECK: %[[COLLAPSED_MEM:.+]] = memref.collapse_shape %[[MEM]]
601
- // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
602
- // CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x?xi32 >
603
- // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[ IDX_2]]]
604
+ // CHECK-SAME{LITERAL}: [[0], [1], [ 2, 3]]
605
+ // CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x?x24xi32 >
606
+ // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_2]]]
604
607
// CHECK: %[[VEC_1D:.+]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
605
- // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[COLLAPSED_IDX]]]
606
- // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x?xi32 >
608
+ // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[IDX_1]], %[[ COLLAPSED_IDX]]]
609
+ // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x?x24xi32 >
607
610
608
611
// CHECK-128B-LABEL: func @transfer_write_dynamic_dim_to_flatten
609
612
// CHECK-128B-NOT: memref.collapse_shape
0 commit comments