7
7
;; a[i] = b[i] + 1.0;
8
8
9
9
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \
10
- ; RUN: -scalable-vectorization=on - riscv-v-vector-bits-min=128 -S < %s \
10
+ ; RUN: -riscv-v-vector-bits-min=128 -S < %s \
11
11
; RUN: | FileCheck --check-prefix=RV64 %s
12
12
13
13
; RUN: opt -passes=loop-vectorize -mtriple=riscv32 -mattr=+v \
14
- ; RUN: -scalable-vectorization=on - riscv-v-vector-bits-min=128 -S < %s \
14
+ ; RUN: -riscv-v-vector-bits-min=128 -S < %s \
15
15
; RUN: | FileCheck --check-prefix=RV32 %s
16
16
17
+ ; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \
18
+ ; RUN: -riscv-v-vector-bits-min=128 -force-vector-interleave=2 -S < %s \
19
+ ; RUN: | FileCheck --check-prefix=RV64-UF2 %s
20
+
17
21
define void @vector_reverse_i64 (ptr noalias %A , ptr noalias %B , i32 %n ) {
18
22
; RV64-LABEL: define void @vector_reverse_i64(
19
23
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -174,6 +178,105 @@ define void @vector_reverse_i64(ptr noalias %A, ptr noalias %B, i32 %n) {
174
178
; RV32-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
175
179
; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP4:![0-9]+]]
176
180
;
181
+ ; RV64-UF2-LABEL: define void @vector_reverse_i64(
182
+ ; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
183
+ ; RV64-UF2-NEXT: [[ENTRY:.*:]]
184
+ ; RV64-UF2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
185
+ ; RV64-UF2-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
186
+ ; RV64-UF2: [[FOR_BODY_PREHEADER]]:
187
+ ; RV64-UF2-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
188
+ ; RV64-UF2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
189
+ ; RV64-UF2-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8
190
+ ; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
191
+ ; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
192
+ ; RV64-UF2: [[VECTOR_SCEVCHECK]]:
193
+ ; RV64-UF2-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
194
+ ; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
195
+ ; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
196
+ ; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]])
197
+ ; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
198
+ ; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
199
+ ; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]]
200
+ ; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]]
201
+ ; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
202
+ ; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295
203
+ ; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
204
+ ; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
205
+ ; RV64-UF2: [[VECTOR_PH]]:
206
+ ; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
207
+ ; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 8
208
+ ; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP12]]
209
+ ; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
210
+ ; RV64-UF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
211
+ ; RV64-UF2-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 4
212
+ ; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
213
+ ; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
214
+ ; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
215
+ ; RV64-UF2-NEXT: [[TMP17:%.*]] = sub i32 [[N]], [[DOTCAST]]
216
+ ; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
217
+ ; RV64-UF2: [[VECTOR_BODY]]:
218
+ ; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
219
+ ; RV64-UF2-NEXT: [[DOTCAST1:%.*]] = trunc i64 [[INDEX]] to i32
220
+ ; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST1]]
221
+ ; RV64-UF2-NEXT: [[TMP18:%.*]] = add i32 [[OFFSET_IDX]], 0
222
+ ; RV64-UF2-NEXT: [[TMP19:%.*]] = add nsw i32 [[TMP18]], -1
223
+ ; RV64-UF2-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
224
+ ; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP20]]
225
+ ; RV64-UF2-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP14]]
226
+ ; RV64-UF2-NEXT: [[TMP23:%.*]] = sub i64 1, [[TMP14]]
227
+ ; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP22]]
228
+ ; RV64-UF2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP23]]
229
+ ; RV64-UF2-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP14]]
230
+ ; RV64-UF2-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP14]]
231
+ ; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP26]]
232
+ ; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i64 [[TMP27]]
233
+ ; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP25]], align 4
234
+ ; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
235
+ ; RV64-UF2-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i32>, ptr [[TMP29]], align 4
236
+ ; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD2]])
237
+ ; RV64-UF2-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
238
+ ; RV64-UF2-NEXT: [[TMP31:%.*]] = add <vscale x 4 x i32> [[REVERSE3]], splat (i32 1)
239
+ ; RV64-UF2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP20]]
240
+ ; RV64-UF2-NEXT: [[TMP33:%.*]] = mul i64 0, [[TMP14]]
241
+ ; RV64-UF2-NEXT: [[TMP34:%.*]] = sub i64 1, [[TMP14]]
242
+ ; RV64-UF2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 [[TMP33]]
243
+ ; RV64-UF2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[TMP35]], i64 [[TMP34]]
244
+ ; RV64-UF2-NEXT: [[TMP37:%.*]] = mul i64 -1, [[TMP14]]
245
+ ; RV64-UF2-NEXT: [[TMP38:%.*]] = sub i64 1, [[TMP14]]
246
+ ; RV64-UF2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i64 [[TMP37]]
247
+ ; RV64-UF2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[TMP39]], i64 [[TMP38]]
248
+ ; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP30]])
249
+ ; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP36]], align 4
250
+ ; RV64-UF2-NEXT: [[REVERSE5:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP31]])
251
+ ; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE5]], ptr [[TMP40]], align 4
252
+ ; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]]
253
+ ; RV64-UF2-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
254
+ ; RV64-UF2-NEXT: br i1 [[TMP41]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
255
+ ; RV64-UF2: [[MIDDLE_BLOCK]]:
256
+ ; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
257
+ ; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
258
+ ; RV64-UF2: [[SCALAR_PH]]:
259
+ ; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP16]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ]
260
+ ; RV64-UF2-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i32 [ [[TMP17]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ]
261
+ ; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
262
+ ; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]:
263
+ ; RV64-UF2-NEXT: br label %[[FOR_COND_CLEANUP]]
264
+ ; RV64-UF2: [[FOR_COND_CLEANUP]]:
265
+ ; RV64-UF2-NEXT: ret void
266
+ ; RV64-UF2: [[FOR_BODY]]:
267
+ ; RV64-UF2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
268
+ ; RV64-UF2-NEXT: [[I_0_IN8:%.*]] = phi i32 [ [[BC_RESUME_VAL6]], %[[SCALAR_PH]] ], [ [[I_0:%.*]], %[[FOR_BODY]] ]
269
+ ; RV64-UF2-NEXT: [[I_0]] = add nsw i32 [[I_0_IN8]], -1
270
+ ; RV64-UF2-NEXT: [[IDXPROM:%.*]] = zext i32 [[I_0]] to i64
271
+ ; RV64-UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IDXPROM]]
272
+ ; RV64-UF2-NEXT: [[TMP42:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
273
+ ; RV64-UF2-NEXT: [[ADD9:%.*]] = add i32 [[TMP42]], 1
274
+ ; RV64-UF2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM]]
275
+ ; RV64-UF2-NEXT: store i32 [[ADD9]], ptr [[ARRAYIDX3]], align 4
276
+ ; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[INDVARS_IV]], 1
277
+ ; RV64-UF2-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
278
+ ; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP4:![0-9]+]]
279
+ ;
177
280
entry:
178
281
%cmp7 = icmp sgt i32 %n , 0
179
282
br i1 %cmp7 , label %for.body.preheader , label %for.cond.cleanup
@@ -360,6 +463,105 @@ define void @vector_reverse_f32(ptr noalias %A, ptr noalias %B, i32 %n) {
360
463
; RV32-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
361
464
; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]]
362
465
;
466
+ ; RV64-UF2-LABEL: define void @vector_reverse_f32(
467
+ ; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
468
+ ; RV64-UF2-NEXT: [[ENTRY:.*:]]
469
+ ; RV64-UF2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
470
+ ; RV64-UF2-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
471
+ ; RV64-UF2: [[FOR_BODY_PREHEADER]]:
472
+ ; RV64-UF2-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
473
+ ; RV64-UF2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
474
+ ; RV64-UF2-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8
475
+ ; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
476
+ ; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
477
+ ; RV64-UF2: [[VECTOR_SCEVCHECK]]:
478
+ ; RV64-UF2-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
479
+ ; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
480
+ ; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
481
+ ; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]])
482
+ ; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
483
+ ; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
484
+ ; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]]
485
+ ; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]]
486
+ ; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
487
+ ; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295
488
+ ; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
489
+ ; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
490
+ ; RV64-UF2: [[VECTOR_PH]]:
491
+ ; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
492
+ ; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 8
493
+ ; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP12]]
494
+ ; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
495
+ ; RV64-UF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
496
+ ; RV64-UF2-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 4
497
+ ; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
498
+ ; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
499
+ ; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
500
+ ; RV64-UF2-NEXT: [[TMP17:%.*]] = sub i32 [[N]], [[DOTCAST]]
501
+ ; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
502
+ ; RV64-UF2: [[VECTOR_BODY]]:
503
+ ; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
504
+ ; RV64-UF2-NEXT: [[DOTCAST1:%.*]] = trunc i64 [[INDEX]] to i32
505
+ ; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST1]]
506
+ ; RV64-UF2-NEXT: [[TMP18:%.*]] = add i32 [[OFFSET_IDX]], 0
507
+ ; RV64-UF2-NEXT: [[TMP19:%.*]] = add nsw i32 [[TMP18]], -1
508
+ ; RV64-UF2-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
509
+ ; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP20]]
510
+ ; RV64-UF2-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP14]]
511
+ ; RV64-UF2-NEXT: [[TMP23:%.*]] = sub i64 1, [[TMP14]]
512
+ ; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[TMP22]]
513
+ ; RV64-UF2-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP23]]
514
+ ; RV64-UF2-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP14]]
515
+ ; RV64-UF2-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP14]]
516
+ ; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[TMP26]]
517
+ ; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP27]]
518
+ ; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP25]], align 4
519
+ ; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
520
+ ; RV64-UF2-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP29]], align 4
521
+ ; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD2]])
522
+ ; RV64-UF2-NEXT: [[TMP30:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
523
+ ; RV64-UF2-NEXT: [[TMP31:%.*]] = fadd <vscale x 4 x float> [[REVERSE3]], splat (float 1.000000e+00)
524
+ ; RV64-UF2-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP20]]
525
+ ; RV64-UF2-NEXT: [[TMP33:%.*]] = mul i64 0, [[TMP14]]
526
+ ; RV64-UF2-NEXT: [[TMP34:%.*]] = sub i64 1, [[TMP14]]
527
+ ; RV64-UF2-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP32]], i64 [[TMP33]]
528
+ ; RV64-UF2-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP35]], i64 [[TMP34]]
529
+ ; RV64-UF2-NEXT: [[TMP37:%.*]] = mul i64 -1, [[TMP14]]
530
+ ; RV64-UF2-NEXT: [[TMP38:%.*]] = sub i64 1, [[TMP14]]
531
+ ; RV64-UF2-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP32]], i64 [[TMP37]]
532
+ ; RV64-UF2-NEXT: [[TMP40:%.*]] = getelementptr inbounds float, ptr [[TMP39]], i64 [[TMP38]]
533
+ ; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP30]])
534
+ ; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP36]], align 4
535
+ ; RV64-UF2-NEXT: [[REVERSE5:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP31]])
536
+ ; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE5]], ptr [[TMP40]], align 4
537
+ ; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]]
538
+ ; RV64-UF2-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
539
+ ; RV64-UF2-NEXT: br i1 [[TMP41]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
540
+ ; RV64-UF2: [[MIDDLE_BLOCK]]:
541
+ ; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
542
+ ; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
543
+ ; RV64-UF2: [[SCALAR_PH]]:
544
+ ; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP16]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ]
545
+ ; RV64-UF2-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i32 [ [[TMP17]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ]
546
+ ; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
547
+ ; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]:
548
+ ; RV64-UF2-NEXT: br label %[[FOR_COND_CLEANUP]]
549
+ ; RV64-UF2: [[FOR_COND_CLEANUP]]:
550
+ ; RV64-UF2-NEXT: ret void
551
+ ; RV64-UF2: [[FOR_BODY]]:
552
+ ; RV64-UF2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
553
+ ; RV64-UF2-NEXT: [[I_0_IN8:%.*]] = phi i32 [ [[BC_RESUME_VAL6]], %[[SCALAR_PH]] ], [ [[I_0:%.*]], %[[FOR_BODY]] ]
554
+ ; RV64-UF2-NEXT: [[I_0]] = add nsw i32 [[I_0_IN8]], -1
555
+ ; RV64-UF2-NEXT: [[IDXPROM:%.*]] = zext i32 [[I_0]] to i64
556
+ ; RV64-UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IDXPROM]]
557
+ ; RV64-UF2-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX]], align 4
558
+ ; RV64-UF2-NEXT: [[CONV1:%.*]] = fadd float [[TMP42]], 1.000000e+00
559
+ ; RV64-UF2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IDXPROM]]
560
+ ; RV64-UF2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX3]], align 4
561
+ ; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[INDVARS_IV]], 1
562
+ ; RV64-UF2-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
563
+ ; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]]
564
+ ;
363
565
entry:
364
566
%cmp7 = icmp sgt i32 %n , 0
365
567
br i1 %cmp7 , label %for.body.preheader , label %for.cond.cleanup
@@ -391,13 +593,6 @@ for.body: ; preds = %for.body.preheader,
391
593
!2 = !{!"llvm.loop.vectorize.width" , i32 4 }
392
594
!3 = !{!"llvm.loop.vectorize.scalable.enable" , i1 true }
393
595
!4 = !{!"llvm.loop.vectorize.enable" , i1 true }
394
- ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
395
- ; CHECK: [[META1]] = !{!"llvm.loop.mustprogress"}
396
- ; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
397
- ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
398
- ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
399
- ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
400
- ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
401
596
;.
402
597
; RV64: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
403
598
; RV64: [[META1]] = !{!"llvm.loop.mustprogress"}
@@ -415,3 +610,11 @@ for.body: ; preds = %for.body.preheader,
415
610
; RV32: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
416
611
; RV32: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META3]], [[META2]]}
417
612
;.
613
+ ; RV64-UF2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
614
+ ; RV64-UF2: [[META1]] = !{!"llvm.loop.mustprogress"}
615
+ ; RV64-UF2: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
616
+ ; RV64-UF2: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
617
+ ; RV64-UF2: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
618
+ ; RV64-UF2: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
619
+ ; RV64-UF2: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
620
+ ;.
0 commit comments