@@ -153,17 +153,17 @@ func.func @transfer_read_leading_dynamic_dims(
153
153
}
154
154
155
155
// CHECK-LABEL: func @transfer_read_leading_dynamic_dims
156
- // CHECK-SAME: %[[MEM:.+]]: memref<?x?x8x4xi8, {{.+}}>, %[[ARG1 :.+]]: index, %[[ARG2 :.+]]: index
156
+ // CHECK-SAME: %[[MEM:.+]]: memref<?x?x8x4xi8, {{.+}}>, %[[IDX_1 :.+]]: index, %[[IDX_2 :.+]]: index
157
157
// CHECK: %[[C0_I8:.+]] = arith.constant 0 : i8
158
158
// CHECK: %[[C0:.+]] = arith.constant 0 : index
159
159
// CHECK: %[[COLLAPSED:.+]] = memref.collapse_shape %[[MEM]] {{\[}}[0], [1], [2, 3]{{\]}}
160
160
// CHECK-SAME: : memref<?x?x8x4xi8, {{.+}}> into memref<?x?x32xi8, {{.+}}>
161
161
// CHECK: %[[VEC1D:.+]] = vector.transfer_read %[[COLLAPSED]]
162
- // CHECK-SAME: [%[[ARG1 ]], %[[ARG2 ]], %[[C0]]], %[[C0_I8]]
162
+ // CHECK-SAME: [%[[IDX_1 ]], %[[IDX_2 ]], %[[C0]]], %[[C0_I8]]
163
163
// CHECK-SAME: {in_bounds = [true]}
164
164
// CHECK-SAME: : memref<?x?x32xi8, {{.+}}>, vector<32xi8>
165
- // CHECK: %[[VEC2D :.+]] = vector.shape_cast %[[VEC1D]] : vector<32xi8> to vector<8x4xi8>
166
- // CHECK: return %[[VEC2D ]] : vector<8x4xi8>
165
+ // CHECK: %[[RES :.+]] = vector.shape_cast %[[VEC1D]] : vector<32xi8> to vector<8x4xi8>
166
+ // CHECK: return %[[RES ]] : vector<8x4xi8>
167
167
168
168
// CHECK-128B-LABEL: func @transfer_read_leading_dynamic_dims
169
169
// CHECK-128B: memref.collapse_shape
@@ -179,10 +179,10 @@ func.func @negative_transfer_read_dynamic_dim_to_flatten(
179
179
180
180
%c0 = arith.constant 0 : index
181
181
%c0_i32 = arith.constant 0 : i32
182
- %v = vector.transfer_read %mem [%c0 , %idx_1 , %idx_2 , %c0 ], %c0_i32 {
182
+ %res = vector.transfer_read %mem [%c0 , %idx_1 , %idx_2 , %c0 ], %c0_i32 {
183
183
in_bounds = [true , true , true ]
184
184
} : memref <1 x?x4 x6 xi32 >, vector <1 x2 x6 xi32 >
185
- return %v : vector <1 x2 x6 xi32 >
185
+ return %res : vector <1 x2 x6 xi32 >
186
186
}
187
187
188
188
// CHECK-LABEL: func.func @negative_transfer_read_dynamic_dim_to_flatten
@@ -202,9 +202,9 @@ func.func @transfer_read_dims_mismatch_non_contiguous_slice(
202
202
203
203
%c0 = arith.constant 0 : index
204
204
%cst = arith.constant 0 : i8
205
- %v = vector.transfer_read %mem [%c0 , %c0 , %c0 , %c0 ], %cst :
205
+ %res = vector.transfer_read %mem [%c0 , %c0 , %c0 , %c0 ], %cst :
206
206
memref <5 x4 x3 x2 xi8 >, vector <2 x1 x2 x2 xi8 >
207
- return %v : vector <2 x1 x2 x2 xi8 >
207
+ return %res : vector <2 x1 x2 x2 xi8 >
208
208
}
209
209
210
210
// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_contiguous_slice(
@@ -220,8 +220,8 @@ func.func @transfer_read_0d(
220
220
%mem : memref <i8 >) -> vector <i8 > {
221
221
222
222
%cst = arith.constant 0 : i8
223
- %0 = vector.transfer_read %mem [], %cst : memref <i8 >, vector <i8 >
224
- return %0 : vector <i8 >
223
+ %res = vector.transfer_read %mem [], %cst : memref <i8 >, vector <i8 >
224
+ return %res : vector <i8 >
225
225
}
226
226
227
227
// CHECK-LABEL: func.func @transfer_read_0d
@@ -241,9 +241,9 @@ func.func @transfer_read_non_contiguous_src(
241
241
242
242
%c0 = arith.constant 0 : index
243
243
%cst = arith.constant 0 : i8
244
- %v = vector.transfer_read %mem [%c0 , %c0 , %c0 , %c0 ], %cst :
244
+ %res = vector.transfer_read %mem [%c0 , %c0 , %c0 , %c0 ], %cst :
245
245
memref <5 x4 x3 x2 xi8 , strided <[24 , 8 , 2 , 1 ], offset : ?>>, vector <5 x4 x3 x2 xi8 >
246
- return %v : vector <5 x4 x3 x2 xi8 >
246
+ return %res : vector <5 x4 x3 x2 xi8 >
247
247
}
248
248
249
249
// CHECK-LABEL: func.func @transfer_read_non_contiguous_src
@@ -507,9 +507,9 @@ func.func @transfer_write_non_contiguous_src(
507
507
/// TODO: Move to a dedicated file - there's no "flattening" in the following tests
508
508
///----------------------------------------------------------------------------------------
509
509
510
- func.func @fold_unit_dim_add_basic (%vec_0 : vector <1 x8 xi32 >) -> vector <1 x8 xi32 > {
511
- %add = arith.addi %vec_0 , %vec_0 : vector <1 x8 xi32 >
512
- return %add : vector <1 x8 xi32 >
510
+ func.func @fold_unit_dim_add_basic (%vec : vector <1 x8 xi32 >) -> vector <1 x8 xi32 > {
511
+ %res = arith.addi %vec , %vec : vector <1 x8 xi32 >
512
+ return %res : vector <1 x8 xi32 >
513
513
}
514
514
// CHECK-LABEL: func.func @fold_unit_dim_add_basic(
515
515
// CHECK-SAME: %[[VAL_0:.*]]: vector<1x8xi32>) -> vector<1x8xi32> {
@@ -524,9 +524,9 @@ func.func @fold_unit_dim_add_basic(%vec_0 : vector<1x8xi32>) -> vector<1x8xi32>
524
524
525
525
// -----
526
526
527
- func.func @fold_unit_dim_add_leading_and_trailing (%vec_0 : vector <1 x8 x1 xi32 >) -> vector <1 x8 x1 xi32 > {
528
- %add = arith.addi %vec_0 , %vec_0 : vector <1 x8 x1 xi32 >
529
- return %add : vector <1 x8 x1 xi32 >
527
+ func.func @fold_unit_dim_add_leading_and_trailing (%vec : vector <1 x8 x1 xi32 >) -> vector <1 x8 x1 xi32 > {
528
+ %res = arith.addi %vec , %vec : vector <1 x8 x1 xi32 >
529
+ return %res : vector <1 x8 x1 xi32 >
530
530
}
531
531
// CHECK-LABEL: func.func @fold_unit_dim_add_leading_and_trailing(
532
532
// CHECK-SAME: %[[VAL_0:.*]]: vector<1x8x1xi32>) -> vector<1x8x1xi32> {
@@ -583,8 +583,8 @@ func.func @fold_unit_dim_mulf(%vec_0 : vector<8x[2]x1xf32>,
583
583
584
584
// -----
585
585
586
- func.func @fold_unit_dim_sitofp (%vec_0 : vector <8 x[2 ]x1 xi8 >) -> vector <8 x[2 ]xf32 > {
587
- %sc_vec_0 = vector.shape_cast %vec_0 : vector <8 x[2 ]x1 xi8 > to vector <1 x8 x[2 ]xi8 >
586
+ func.func @fold_unit_dim_sitofp (%vec : vector <8 x[2 ]x1 xi8 >) -> vector <8 x[2 ]xf32 > {
587
+ %sc_vec_0 = vector.shape_cast %vec : vector <8 x[2 ]x1 xi8 > to vector <1 x8 x[2 ]xi8 >
588
588
%add = arith.sitofp %sc_vec_0 : vector <1 x8 x[2 ]xi8 > to vector <1 x8 x[2 ]xf32 >
589
589
%res = vector.shape_cast %add : vector <1 x8 x[2 ]xf32 > to vector <8 x[2 ]xf32 >
590
590
return %res : vector <8 x[2 ]xf32 >
@@ -628,7 +628,7 @@ func.func @fold_unit_dims_entirely(%vec_0 : vector<8xi32>,
628
628
// -----
629
629
630
630
func.func @fold_inner_unit_dim (%vec_0 : vector <8 x1 x3 xf128 >,
631
- %vec_1 : vector <1 x8 x3 xf128 >) -> vector <8 x3 xf128 > {
631
+ %vec_1 : vector <1 x8 x3 xf128 >) -> vector <8 x3 xf128 > {
632
632
%sc_vec_1 = vector.shape_cast %vec_1 : vector <1 x8 x3 xf128 > to vector <8 x1 x3 xf128 >
633
633
%mul = arith.mulf %vec_0 , %sc_vec_1 : vector <8 x1 x3 xf128 >
634
634
%res = vector.shape_cast %mul : vector <8 x1 x3 xf128 > to vector <8 x3 xf128 >
@@ -646,7 +646,7 @@ func.func @fold_inner_unit_dim(%vec_0 : vector<8x1x3xf128>,
646
646
// -----
647
647
648
648
func.func @fold_inner_unit_dim_scalable (%vec_0 : vector <8 x1 x[1 ]x3 xf128 >,
649
- %vec_1 : vector <1 x8 x[1 ]x3 xf128 >) -> vector <8 x[1 ]x3 xf128 > {
649
+ %vec_1 : vector <1 x8 x[1 ]x3 xf128 >) -> vector <8 x[1 ]x3 xf128 > {
650
650
%sc_vec_1 = vector.shape_cast %vec_1 : vector <1 x8 x[1 ]x3 xf128 > to vector <8 x1 x[1 ]x3 xf128 >
651
651
%mul = arith.mulf %vec_0 , %sc_vec_1 : vector <8 x1 x[1 ]x3 xf128 >
652
652
%res = vector.shape_cast %mul : vector <8 x1 x[1 ]x3 xf128 > to vector <8 x[1 ]x3 xf128 >
@@ -663,8 +663,8 @@ func.func @fold_inner_unit_dim_scalable(%vec_0 : vector<8x1x[1]x3xf128>,
663
663
664
664
// -----
665
665
666
- func.func @fold_all_unit_dims (%vec_0 : vector <1 x1 xf32 >) -> vector <1 xf32 > {
667
- %0 = arith.mulf %vec_0 , %vec_0 : vector <1 x1 xf32 >
666
+ func.func @fold_all_unit_dims (%vec : vector <1 x1 xf32 >) -> vector <1 xf32 > {
667
+ %0 = arith.mulf %vec , %vec : vector <1 x1 xf32 >
668
668
%res = vector.shape_cast %0 : vector <1 x1 xf32 > to vector <1 xf32 >
669
669
return %res : vector <1 xf32 >
670
670
}
@@ -682,9 +682,9 @@ func.func @negative_out_of_bound_transfer_read(
682
682
%mem : memref <?x4 x3 x2 xi8 , strided <[24 , 6 , 2 , 1 ], offset : ?>>) -> vector <5 x4 x3 x2 xi8 > {
683
683
%c0 = arith.constant 0 : index
684
684
%cst = arith.constant 0 : i8
685
- %v = vector.transfer_read %mem [%c0 , %c0 , %c0 , %c0 ], %cst {in_bounds = [false , true , true , true ]} :
685
+ %res = vector.transfer_read %mem [%c0 , %c0 , %c0 , %c0 ], %cst {in_bounds = [false , true , true , true ]} :
686
686
memref <?x4 x3 x2 xi8 , strided <[24 , 6 , 2 , 1 ], offset : ?>>, vector <5 x4 x3 x2 xi8 >
687
- return %v : vector <5 x4 x3 x2 xi8 >
687
+ return %res : vector <5 x4 x3 x2 xi8 >
688
688
}
689
689
// CHECK: func.func @negative_out_of_bound_transfer_read
690
690
// CHECK-NOT: memref.collapse_shape
0 commit comments