|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
1 | 2 | ; REQUIRES: asserts
|
2 | 3 | ; RUN: opt -loop-vectorize -scalable-vectorization=on -S -mtriple=aarch64 -mattr=+sve -debug-only=loop-vectorize < %s 2>&1 | FileCheck %s
|
3 | 4 |
|
4 | 5 | target triple = "aarch64-unknown-linux-gnu"
|
5 | 6 |
|
6 |
| -; In the test below the PHI instruction: |
7 |
| -; %0 = phi i8* [ %incdec.ptr190, %loop.body ], [ %src, %entry ] |
8 |
| -; has multiple uses, i.e. |
9 |
| -; 1. As a uniform address for the load, and |
10 |
| -; 2. Non-uniform use by the getelementptr + store, which leads to replication. |
11 |
| - |
12 |
| -; CHECK-LABEL: LV: Checking a loop in "phi_multiple_use" |
13 |
| -; CHECK-NOT: LV: Found new scalar instruction: %incdec.ptr190 = getelementptr inbounds i8, i8* %0, i64 1 |
| 7 | +; CHECK-LABEL: LV: Checking a loop in "pointer_induction_used_as_vector" |
| 8 | +; CHECK-NOT: LV: Found {{.*}} scalar instruction: %ptr.iv.2.next = getelementptr inbounds i8, i8* %ptr.iv.2, i64 1 |
14 | 9 | ;
|
15 | 10 | ; CHECK: VPlan 'Initial VPlan for VF={vscale x 2},UF>=1' {
|
16 | 11 | ; CHECK-NEXT: loop.body:
|
17 |
| -; CHECK-NEXT: WIDEN-INDUCTION %index = phi 0, %index.next |
18 |
| -; CHECK-NEXT: WIDEN-PHI %curchar = phi %curchar.next, %curptr |
19 |
| -; CHECK-NEXT: WIDEN-PHI %0 = phi %incdec.ptr190, %src |
20 |
| -; CHECK-NEXT: WIDEN-GEP Var[Inv] ir<%incdec.ptr190> = getelementptr ir<%0>, ir<1> |
21 |
| -; CHECK-NEXT: WIDEN store ir<%curchar>, ir<%incdec.ptr190> |
22 |
| -; CHECK-NEXT: WIDEN ir<%1> = load ir<%0> |
23 |
| -; CHECK-NEXT: WIDEN ir<%2> = add ir<%1>, ir<1> |
24 |
| -; CHECK-NEXT: WIDEN store ir<%0>, ir<%2> |
| 12 | +; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next |
| 13 | +; CHECK-NEXT: WIDEN-PHI %ptr.iv.1 = phi %start.1, %ptr.iv.1.next |
| 14 | +; CHECK-NEXT: WIDEN-PHI %ptr.iv.2 = phi %start.2, %ptr.iv.2.next |
| 15 | +; CHECK-NEXT: WIDEN-GEP Var[Inv] ir<%ptr.iv.2.next> = getelementptr ir<%ptr.iv.2>, ir<1> |
| 16 | +; CHECK-NEXT: WIDEN store ir<%ptr.iv.1>, ir<%ptr.iv.2.next> |
| 17 | +; CHECK-NEXT: WIDEN ir<%lv> = load ir<%ptr.iv.2> |
| 18 | +; CHECK-NEXT: WIDEN ir<%add> = add ir<%lv>, ir<1> |
| 19 | +; CHECK-NEXT: WIDEN store ir<%ptr.iv.2>, ir<%add> |
25 | 20 | ; CHECK-NEXT: No successors
|
26 | 21 | ; CHECK-NEXT: }
|
27 | 22 |
|
28 |
| -define void @phi_multiple_use(i8** noalias %curptr, i8* noalias %src, i64 %N) #0 { |
29 |
| -; CHECK-LABEL: @phi_multiple_use( |
| 23 | +; In the test below the pointer phi %ptr.iv.2 is used as |
| 24 | +; 1. As a uniform address for the load, and |
| 25 | +; 2. Non-uniform use by the getelementptr which is stored. This requires the |
| 26 | +; vector value. |
| 27 | +define void @pointer_induction_used_as_vector(i8** noalias %start.1, i8* noalias %start.2, i64 %N) { |
| 28 | +; CHECK-LABEL: @pointer_induction_used_as_vector( |
| 29 | +; CHECK-NEXT: entry: |
| 30 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 31 | +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 |
| 32 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] |
| 33 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 34 | +; CHECK: vector.ph: |
| 35 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| 36 | +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 |
| 37 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] |
| 38 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] |
| 39 | +; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8*, i8** [[START_1:%.*]], i64 [[N_VEC]] |
| 40 | +; CHECK-NEXT: [[IND_END3:%.*]] = getelementptr i8, i8* [[START_2:%.*]], i64 [[N_VEC]] |
| 41 | +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
30 | 42 | ; CHECK: vector.body:
|
31 |
| -; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %vector.ph ], [ {{.*}}, %vector.body ] |
32 |
| -; CHECK-NEXT: {{.*}} = add i64 [[INDEX1]], 0 |
33 |
| -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX1]], 0 |
34 |
| -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8*, i8** %curptr, i64 [[TMP1]] |
35 |
| -; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64() |
36 |
| -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX1]], i32 0 |
| 43 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 44 | +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0 |
| 45 | +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0 |
| 46 | +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8*, i8** [[START_1]], i64 [[TMP5]] |
| 47 | +; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64() |
| 48 | +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX]], i32 0 |
37 | 49 | ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
|
38 |
| -; CHECK-NEXT: [[TMP3:%.*]] = add <vscale x 2 x i64> shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 0, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer), [[TMP2]] |
39 |
| -; CHECK-NEXT: [[TMP4:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP3]] |
40 |
| -; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, i8* %src, <vscale x 2 x i64> [[TMP4]] |
41 |
| -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, <vscale x 2 x i8*> [[NEXT_GEP6]], i64 1 |
42 |
| -; CHECK: store <vscale x 2 x i8*> [[TMP5]], <vscale x 2 x i8*>* |
43 |
| -; CHECK-NEXT: [[TMP6:%.*]] = extractelement <vscale x 2 x i8*> [[NEXT_GEP6]], i32 0 |
44 |
| -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[TMP6]], i32 0 |
45 |
| -; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to <vscale x 2 x i8>* |
46 |
| -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, <vscale x 2 x i8>* [[TMP8]] |
47 |
| -; CHECK-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], |
48 |
| -; CHECK: store <vscale x 2 x i8> [[TMP9]], <vscale x 2 x i8>* |
| 50 | +; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 0, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer), [[TMP6]] |
| 51 | +; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP7]] |
| 52 | +; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, i8* [[START_2]], <vscale x 2 x i64> [[TMP8]] |
| 53 | +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, <vscale x 2 x i8*> [[NEXT_GEP4]], i64 1 |
| 54 | +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8*, i8** [[NEXT_GEP]], i32 0 |
| 55 | +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to <vscale x 2 x i8*>* |
| 56 | +; CHECK-NEXT: store <vscale x 2 x i8*> [[TMP9]], <vscale x 2 x i8*>* [[TMP11]], align 8 |
| 57 | +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 2 x i8*> [[NEXT_GEP4]], i32 0 |
| 58 | +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[TMP12]], i32 0 |
| 59 | +; CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to <vscale x 2 x i8>* |
| 60 | +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, <vscale x 2 x i8>* [[TMP14]], align 1 |
| 61 | +; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], shufflevector (<vscale x 2 x i8> insertelement (<vscale x 2 x i8> poison, i8 1, i32 0), <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer) |
| 62 | +; CHECK-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP13]] to <vscale x 2 x i8>* |
| 63 | +; CHECK-NEXT: store <vscale x 2 x i8> [[TMP15]], <vscale x 2 x i8>* [[TMP16]], align 1 |
| 64 | +; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() |
| 65 | +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 2 |
| 66 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] |
| 67 | +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 68 | +; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 69 | +; CHECK: middle.block: |
| 70 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] |
| 71 | +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] |
| 72 | +; CHECK: scalar.ph: |
| 73 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| 74 | +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i8** [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START_1]], [[ENTRY]] ] |
| 75 | +; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i8* [ [[IND_END3]], [[MIDDLE_BLOCK]] ], [ [[START_2]], [[ENTRY]] ] |
| 76 | +; CHECK-NEXT: br label [[LOOP_BODY:%.*]] |
| 77 | +; CHECK: loop.body: |
| 78 | +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_BODY]] ] |
| 79 | +; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi i8** [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_IV_1_NEXT:%.*]], [[LOOP_BODY]] ] |
| 80 | +; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi i8* [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], [[LOOP_BODY]] ] |
| 81 | +; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr inbounds i8*, i8** [[PTR_IV_1]], i64 1 |
| 82 | +; CHECK-NEXT: [[PTR_IV_2_NEXT]] = getelementptr inbounds i8, i8* [[PTR_IV_2]], i64 1 |
| 83 | +; CHECK-NEXT: store i8* [[PTR_IV_2_NEXT]], i8** [[PTR_IV_1]], align 8 |
| 84 | +; CHECK-NEXT: [[LV:%.*]] = load i8, i8* [[PTR_IV_2]], align 1 |
| 85 | +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[LV]], 1 |
| 86 | +; CHECK-NEXT: store i8 [[ADD]], i8* [[PTR_IV_2]], align 1 |
| 87 | +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 |
| 88 | +; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[IV_NEXT]], [[N]] |
| 89 | +; CHECK-NEXT: br i1 [[C]], label [[LOOP_BODY]], label [[EXIT]], !llvm.loop [[LOOP2:![0-9]+]] |
| 90 | +; CHECK: exit: |
| 91 | +; CHECK-NEXT: ret void |
| 92 | +; |
| 93 | + |
49 | 94 |
|
50 | 95 | entry:
|
51 | 96 | br label %loop.body
|
52 | 97 |
|
53 | 98 | loop.body: ; preds = %loop.body, %entry
|
54 |
| - %index = phi i64 [ 0, %entry ], [ %index.next, %loop.body ] |
55 |
| - %curchar = phi i8** [ %curchar.next, %loop.body ], [ %curptr, %entry ] |
56 |
| - %0 = phi i8* [ %incdec.ptr190, %loop.body ], [ %src, %entry ] |
57 |
| - %incdec.ptr190 = getelementptr inbounds i8, i8* %0, i64 1 |
58 |
| - %curchar.next = getelementptr inbounds i8*, i8** %curchar, i64 1 |
59 |
| - store i8* %incdec.ptr190, i8** %curchar, align 8 |
60 |
| - %1 = load i8, i8* %0, align 1 |
61 |
| - %2 = add i8 %1, 1 |
62 |
| - store i8 %2, i8* %0, align 1 |
63 |
| - %index.next = add nuw i64 %index, 1 |
64 |
| - %3 = icmp ne i64 %index.next, %N |
65 |
| - br i1 %3, label %loop.body, label %exit, !llvm.loop !0 |
| 99 | + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.body ] |
| 100 | + %ptr.iv.1 = phi i8** [ %start.1, %entry ], [ %ptr.iv.1.next, %loop.body ] |
| 101 | + %ptr.iv.2 = phi i8* [ %start.2, %entry ], [ %ptr.iv.2.next, %loop.body ] |
| 102 | + %ptr.iv.1.next = getelementptr inbounds i8*, i8** %ptr.iv.1, i64 1 |
| 103 | + %ptr.iv.2.next = getelementptr inbounds i8, i8* %ptr.iv.2, i64 1 |
| 104 | + store i8* %ptr.iv.2.next, i8** %ptr.iv.1, align 8 |
| 105 | + %lv = load i8, i8* %ptr.iv.2, align 1 |
| 106 | + %add = add i8 %lv, 1 |
| 107 | + store i8 %add, i8* %ptr.iv.2, align 1 |
| 108 | + %iv.next = add nuw i64 %iv, 1 |
| 109 | + %c = icmp ne i64 %iv.next, %N |
| 110 | + br i1 %c, label %loop.body, label %exit, !llvm.loop !0 |
66 | 111 |
|
67 | 112 | exit: ; preds = %loop.body
|
68 | 113 | ret void
|
|
0 commit comments