|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
2 |
| -; RUN: opt -p loop-vectorize -S %s | FileCheck %s |
| 2 | +; RUN: opt -p loop-vectorize -scalable-vectorization=on -force-vector-width=1 -S %s | FileCheck %s |
3 | 3 |
|
4 | 4 | target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
|
5 | 5 | target triple = "riscv64-unknown-linux-gnu"
|
6 | 6 |
|
7 |
| -; Make sure we do not pick <vscale x 1 x i64> as VF for a loop with a |
8 |
| -; first-order recurrence. |
9 | 7 | define i64 @pr97452_scalable_vf1_for(ptr %src, ptr noalias %dst) #0 {
|
10 | 8 | ; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for(
|
11 | 9 | ; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
|
12 | 10 | ; CHECK-NEXT: [[ENTRY:.*]]:
|
13 |
| -; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 11 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 12 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 23, [[TMP0]] |
| 13 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
14 | 14 | ; CHECK: [[VECTOR_PH]]:
|
| 15 | +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() |
| 16 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] |
| 17 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] |
| 18 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| 19 | +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() |
| 20 | +; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 |
| 21 | +; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]] |
15 | 22 | ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
16 | 23 | ; CHECK: [[VECTOR_BODY]]:
|
17 | 24 | ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
18 |
| -; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ] |
19 |
| -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] |
20 |
| -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0 |
21 |
| -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 |
22 |
| -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 |
23 |
| -; CHECK-NEXT: [[WIDE_LOAD1]] = load <4 x i64>, ptr [[TMP5]], align 8 |
24 |
| -; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
25 |
| -; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[WIDE_LOAD]], <4 x i64> [[WIDE_LOAD1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6> |
| 25 | +; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 1 x i64> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] |
| 26 | +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] |
| 27 | +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0 |
| 28 | +; CHECK-NEXT: [[WIDE_LOAD]] = load <vscale x 1 x i64>, ptr [[TMP6]], align 8 |
| 29 | +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1) |
26 | 30 | ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]]
|
27 | 31 | ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0
|
28 |
| -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 4 |
29 |
| -; CHECK-NEXT: store <4 x i64> [[TMP3]], ptr [[TMP9]], align 8 |
30 |
| -; CHECK-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP7]], align 8 |
31 |
| -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 |
32 |
| -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 |
33 |
| -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 32 | +; CHECK-NEXT: store <vscale x 1 x i64> [[TMP7]], ptr [[TMP9]], align 8 |
| 33 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] |
| 34 | +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 35 | +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
34 | 36 | ; CHECK: [[MIDDLE_BLOCK]]:
|
35 |
| -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 2 |
36 |
| -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 3 |
37 |
| -; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| 37 | +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vscale.i32() |
| 38 | +; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 |
| 39 | +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <vscale x 1 x i64> [[TMP7]], i32 [[TMP12]] |
| 40 | +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() |
| 41 | +; CHECK-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 |
| 42 | +; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 1 x i64> [[WIDE_LOAD]], i32 [[TMP15]] |
| 43 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 23, [[N_VEC]] |
| 44 | +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] |
38 | 45 | ; CHECK: [[SCALAR_PH]]:
|
39 | 46 | ; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
|
40 |
| -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| 47 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
41 | 48 | ; CHECK-NEXT: br label %[[LOOP:.*]]
|
42 | 49 | ; CHECK: [[LOOP]]:
|
43 | 50 | ; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ]
|
44 | 51 | ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
|
45 | 52 | ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
|
46 |
| -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] |
47 |
| -; CHECK-NEXT: [[L]] = load i64, ptr [[GEP]], align 8 |
| 53 | +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] |
| 54 | +; CHECK-NEXT: [[L]] = load i64, ptr [[GEP_SRC]], align 8 |
48 | 55 | ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]]
|
49 | 56 | ; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP_DST]], align 8
|
50 | 57 | ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22
|
51 | 58 | ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
|
52 | 59 | ; CHECK: [[EXIT]]:
|
53 |
| -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] |
| 60 | +; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] |
54 | 61 | ; CHECK-NEXT: ret i64 [[RES]]
|
55 | 62 | ;
|
56 | 63 | entry:
|
|
0 commit comments