@@ -113,14 +113,14 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
113
113
// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
114
114
115
115
// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_zero_indices(
116
- // CHECK-SAME: %[[IDX_1:.* ]]: index, %[[IDX_2:.* ]]: index,
117
- // CHECK-SAME: %[[MEM:.* ]]: memref<1x43x4x6xi32>
118
- // CHECK: %[[C_0:.* ]] = arith.constant 0 : i32
119
- // CHECK: %[[COLLAPSED_IN:.* ]] = memref.collapse_shape %[[MEM]]
116
+ // CHECK-SAME: %[[IDX_1:.+ ]]: index, %[[IDX_2:.+ ]]: index,
117
+ // CHECK-SAME: %[[MEM:.+ ]]: memref<1x43x4x6xi32>
118
+ // CHECK: %[[C_0:.+ ]] = arith.constant 0 : i32
119
+ // CHECK: %[[COLLAPSED_IN:.+ ]] = memref.collapse_shape %[[MEM]]
120
120
// CHECK-SAME-LITERAL: [[0, 1, 2, 3]]
121
121
// CHECK-SAME: : memref<1x43x4x6xi32> into memref<1032xi32>
122
- // CHECK: %[[COLLAPSED_IDX:.* ]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
123
- // CHECK: %[[READ:.* ]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
122
+ // CHECK: %[[COLLAPSED_IDX:.+ ]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
123
+ // CHECK: %[[READ:.+ ]] = vector.transfer_read %[[COLLAPSED_IN]][%[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1032xi32>, vector<12xi32>
124
124
125
125
// CHECK-128B-LABEL: func @transfer_read_dims_mismatch_non_zero_indices(
126
126
// CHECK-128B-NOT: memref.collapse_shape
@@ -191,7 +191,7 @@ func.func @transfer_read_leading_dynamic_dims(
191
191
192
192
// -----
193
193
194
- // One of the dims to be flattened is dynamic - not supported ATM.
194
+ // One of the dims to be flattened can be dynamic if it's leftmost
195
195
196
196
func.func @transfer_read_dynamic_dim_to_flatten (
197
197
%idx_1: index ,
@@ -206,20 +206,21 @@ func.func @transfer_read_dynamic_dim_to_flatten(
206
206
return %res : vector <1 x2 x6 xi32 >
207
207
}
208
208
209
- // CHECK: #[[$MAP:.* ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
209
+ // CHECK: #[[$MAP:.+ ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
210
210
211
211
// CHECK-LABEL: func.func @transfer_read_dynamic_dim_to_flatten
212
212
// CHECK-SAME: %[[IDX_1:arg0]]
213
213
// CHECK-SAME: %[[IDX_2:arg1]]
214
214
// CHECK-SAME: %[[MEM:arg2]]
215
- // CHECK: %[[C0_I32:.*]] = arith.constant 0 : i32
216
- // CHECK: %[[COLLAPSED:.*]] = memref.collapse_shape %[[MEM]]
217
- // CHECK-SAME-LITERAL: [[0, 1, 2, 3]]
218
- // CHECK-SAME: memref<1x?x4x6xi32> into memref<?xi32>
219
- // CHECK: %[[COLLAPSED_IDX:.*]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
220
- // CHECK: %[[VEC_1D:.*]] = vector.transfer_read %[[COLLAPSED]][%[[COLLAPSED_IDX]]],
221
- // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<?xi32>, vector<12xi32>
222
- // CHECK: %[[RESULT:.*]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
215
+ // CHECK: %[[C0_I32:.+]] = arith.constant 0 : i32
216
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
217
+ // CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]]
218
+ // CHECK-SAME-LITERAL: [[0], [1, 2, 3]]
219
+ // CHECK-SAME: memref<1x?x4x6xi32> into memref<1x?xi32>
220
+ // CHECK: %[[COLLAPSED_IDX:.+]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
221
+ // CHECK: %[[VEC_1D:.+]] = vector.transfer_read %[[COLLAPSED]][%[[C0]], %[[COLLAPSED_IDX]]],
222
+ // CHECK-SAME: %[[C0_I32]] {in_bounds = [true]} : memref<1x?xi32>, vector<12xi32>
223
+ // CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC_1D]] : vector<12xi32> to vector<1x2x6xi32>
223
224
// CHECK: return %[[RESULT]] : vector<1x2x6xi32>
224
225
225
226
@@ -468,7 +469,7 @@ func.func @transfer_write_leading_dynamic_dims(
468
469
469
470
// -----
470
471
471
- // One of the dims to be flattened is dynamic - not supported ATM.
472
+ // One of the dims to be flattened can be dynamic, if it's leftmost
472
473
473
474
func.func @transfer_write_dynamic_to_flatten (
474
475
%idx_1: index ,
@@ -483,21 +484,21 @@ func.func @transfer_write_dynamic_to_flatten(
483
484
return
484
485
}
485
486
486
- // CHECK: #[[$MAP:.* ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
487
+ // CHECK: #[[$MAP:.+ ]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
487
488
488
489
// CHECK-LABEL: func.func @transfer_write_dynamic_to_flatten
489
490
// CHECK-SAME: %[[IDX_1:arg0]]: index
490
491
// CHECK-SAME: %[[IDX_2:arg1]]: index
491
492
// CHECK-SAME: %[[VEC:arg2]]: vector<1x2x6xi32>
492
493
// CHECK-SAME: %[[MEM:arg3]]: memref<1x?x4x6xi32>
493
-
494
- // CHECK: %[[COLLAPSED_MEM:.* ]] = memref.collapse_shape %[[MEM]]
495
- // CHECK-SAME-LITERAL: [[0, 1, 2, 3]]
496
- // CHECK-SAME: : memref<1x?x4x6xi32> into memref<?xi32>
497
- // CHECK: %[[COLLAPSED_IDX:.* ]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
498
- // CHECK: %[[VEC_1D:.* ]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
499
- // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[COLLAPSED_IDX]]]
500
- // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<?xi32>
494
+ // CHECK: %[[C0:.+]] = arith.constant 0 : index
495
+ // CHECK: %[[COLLAPSED_MEM:.+ ]] = memref.collapse_shape %[[MEM]]
496
+ // CHECK-SAME-LITERAL: [[0], [ 1, 2, 3]]
497
+ // CHECK-SAME: : memref<1x?x4x6xi32> into memref<1x ?xi32>
498
+ // CHECK: %[[COLLAPSED_IDX:.+ ]] = affine.apply #[[$MAP]]()[%[[IDX_1]], %[[IDX_2]]]
499
+ // CHECK: %[[VEC_1D:.+ ]] = vector.shape_cast %[[VEC]] : vector<1x2x6xi32> to vector<12xi32>
500
+ // CHECK: vector.transfer_write %[[VEC_1D]], %[[COLLAPSED_MEM]][%[[C0]], %[[ COLLAPSED_IDX]]]
501
+ // CHECK-SAME: {in_bounds = [true]} : vector<12xi32>, memref<1x ?xi32>
501
502
502
503
// CHECK-128B-LABEL: func @transfer_write_dynamic_to_flatten
503
504
// CHECK-128B-NOT: memref.collapse_shape
0 commit comments