@@ -113,14 +113,14 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
113
113
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
114
114
115
115
// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_zero_indices(
116
- // CHECK-SAME: %[[IDX_1:.* ]]: index, %[[IDX_2:.* ]]: index,
117
- // CHECK-SAME: %[[MEM:.* ]]: memref<1x43x4x6xi32>
118
- // CHECK: %[[C_0:.* ]] = arith.constant 0 : i32
119
- // CHECK: %[[COLLAPSED_IN:.* ]] = memref.collapse_shape %[[MEM]]
116
+ // CHECK-SAME: %[[IDX_1:.+ ]]: index, %[[IDX_2:.+ ]]: index,
117
+ // CHECK-SAME: %[[MEM:.+ ]]: memref<1x43x4x6xi32>
118
+ // CHECK: %[[C_0:.+ ]] = arith.constant 0 : i32
119
+ // CHECK: %[[COLLAPSED_IN:.+ ]] = memref.collapse_shape %[[MEM]]
120
120
// CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
121
121
// CHECK-SAME: : memref<1x43x4x6xi32> into memref<1032xi32>
122
- // CHECK: %[[COLLAPSED_IDX:.* ]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
123
- // CHECK: %[[READ:.* ]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
122
+ // CHECK: %[[COLLAPSED_IDX:.+ ]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
123
+ // CHECK: %[[READ:.+ ]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
124
124
125
125
// CHECK-128B-LABEL: func @transfer_read_dims_mismatch_non_zero_indices(
126
126
// CHECK-128B-NOT: memref.collapse_shape
@@ -228,20 +228,21 @@ func.func @transfer_read_dynamic_dim_to_flatten(
228
228
return %res : vector <1 x2 x6 xi32 >
229
229
}
230
230
231
- // CHECK: #[[$MAP:.* ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
231
+ // CHECK: #[[$MAP:.+ ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
232
232
233
233
// CHECK-LABEL: func.func @transfer_read_dynamic_dim_to_flatten
234
234
// CHECK-SAME: %[[IDX_1:arg0]]
235
235
// CHECK-SAME: %[[IDX_2:arg1]]
236
236
// CHECK-SAME: %[[MEM:arg2]]
237
- // CHECK: %[[C0_I32:.*]] = arith.constant 0 : i32
238
- // CHECK: %[[COLLAPSED:.*]] = memref.collapse_shape %[[MEM]]
239
- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
240
- // CHECK-SAME: memref<1x?x4x6xi32> into memref<?xi32>
241
- // CHECK: %[[COLLAPSED_IDX:.*]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
242
- // CHECK: %[[VEC_1D:.*]] = vector.transfer_read %[[COLLAPSED]][%[[COLLAPSED_IDX]]],
243
- // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<?xi32>, vector<12xi32>
244
- // CHECK: %[[RESULT:.*]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
237
+ // CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
238
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
239
+ // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
240
+ // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]]
241
+ // CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?xi32>
242
+ // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
243
+ // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[COLLAPSED_IDX]]],
244
+ // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?xi32>, vector<12xi32>
245
+ // CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
245
246
// CHECK: return %[[RESULT]] : vector<1x2x6xi32>
246
247
247
248
@@ -525,21 +526,21 @@ func.func @transfer_write_dynamic_to_flatten(
525
526
return
526
527
}
527
528
528
- // CHECK: #[[$MAP:.* ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
529
+ // CHECK: #[[$MAP:.+ ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
529
530
530
531
// CHECK-LABEL: func.func @transfer_write_dynamic_to_flatten
531
532
// CHECK-SAME: %[[IDX_1:arg0]]: index
532
533
// CHECK-SAME: %[[IDX_2:arg1]]: index
533
534
// CHECK-SAME: %[[VEC:arg2]]: vector<1x2x6xi32>
534
535
// CHECK-SAME: %[[MEM:arg3]]: memref<1x?x4x6xi32>
535
-
536
- // CHECK: %[[COLLAPSED_MEM:.* ]] = memref.collapse_shape %[[MEM]]
537
- // CHECK-SAME{LITERAL}: [[0, 1, 2, 3]]
538
- // CHECK-SAME: : memref<1x?x4x6xi32> into memref<?xi32>
539
- // CHECK: %[[COLLAPSED_IDX:.* ]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
540
- // CHECK: %[[VEC_1D:.* ]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
541
- // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[COLLAPSED_IDX]]]
542
- // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<?xi32>
536
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
537
+ // CHECK: %[[COLLAPSED_MEM:.+ ]] = memref.collapse_shape %[[MEM]]
538
+ // CHECK-SAME{LITERAL}: [[0], [ 1, 2, 3]]
539
+ // CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x ?xi32>
540
+ // CHECK: %[[COLLAPSED_IDX:.+ ]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
541
+ // CHECK: %[[VEC_1D:.+ ]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
542
+ // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[ COLLAPSED_IDX]]]
543
+ // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x ?xi32>
543
544
544
545
// CHECK-128B-LABEL: func @transfer_write_dynamic_to_flatten
545
546
// CHECK-128B-NOT: memref.collapse_shape
0 commit comments