Skip to content

Commit 0338c55

Browse files
authored
[LV, VPlan] Check if plan is compatible to EVL transform (#92092)
The transform updates all users of inductions to work based on EVL, instead of the VF directly. At the moment, widened inductions cannot be updated, so bail out if the plan contains any. This patch introduces a check before applying EVL transform. If any recipes in loop rely on RuntimeVF, the plan is discarded.
1 parent 9b31cc7 commit 0338c55

File tree

5 files changed

+33
-174
lines changed

5 files changed

+33
-174
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8536,8 +8536,10 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
85368536
*Plan, CM.getMinimalBitwidths(), PSE.getSE()->getContext());
85378537
VPlanTransforms::optimize(*Plan, *PSE.getSE());
85388538
// TODO: try to put it close to addActiveLaneMask().
8539-
if (CM.foldTailWithEVL())
8540-
VPlanTransforms::addExplicitVectorLength(*Plan);
8539+
// Discard the plan if it is not EVL-compatible
8540+
if (CM.foldTailWithEVL() &&
8541+
!VPlanTransforms::tryAddExplicitVectorLength(*Plan))
8542+
break;
85418543
assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
85428544
VPlans.push_back(std::move(Plan));
85438545
}

llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1318,8 +1318,16 @@ void VPlanTransforms::addActiveLaneMask(
13181318
/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
13191319
/// ...
13201320
///
1321-
void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) {
1321+
bool VPlanTransforms::tryAddExplicitVectorLength(VPlan &Plan) {
13221322
VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1323+
// The transform updates all users of inductions to work based on EVL, instead
1324+
// of the VF directly. At the moment, widened inductions cannot be updated, so
1325+
// bail out if the plan contains any.
1326+
if (any_of(Header->phis(), [](VPRecipeBase &Phi) {
1327+
return (isa<VPWidenIntOrFpInductionRecipe>(&Phi) ||
1328+
isa<VPWidenPointerInductionRecipe>(&Phi));
1329+
}))
1330+
return false;
13231331
auto *CanonicalIVPHI = Plan.getCanonicalIV();
13241332
VPValue *StartV = CanonicalIVPHI->getStartValue();
13251333

@@ -1377,6 +1385,7 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) {
13771385
CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
13781386
// TODO: support unroll factor > 1.
13791387
Plan.setUF(1);
1388+
return true;
13801389
}
13811390

13821391
void VPlanTransforms::dropPoisonGeneratingRecipes(

llvm/lib/Transforms/Vectorize/VPlanTransforms.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,8 @@ struct VPlanTransforms {
104104
/// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe.
105105
/// VPCanonicalIVPHIRecipe is only used to control the loop after
106106
/// this transformation.
107-
static void addExplicitVectorLength(VPlan &Plan);
107+
/// \returns true if the transformation succeeds, or false if it doesn't.
108+
static bool tryAddExplicitVectorLength(VPlan &Plan);
108109
};
109110

110111
} // namespace llvm

llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll

Lines changed: 8 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -8,60 +8,14 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
88
; CHECK-LABEL: define void @test_wide_integer_induction(
99
; CHECK-SAME: ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
1010
; CHECK-NEXT: entry:
11-
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
12-
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
13-
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2
14-
; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
15-
; CHECK-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
16-
; CHECK: vector.ph:
17-
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
18-
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
19-
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
20-
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2
21-
; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
22-
; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
23-
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
24-
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
25-
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
26-
; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2
27-
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
28-
; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP11]], zeroinitializer
29-
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 2 x i64> [[TMP12]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
30-
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP13]]
31-
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
32-
; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
33-
; CHECK-NEXT: [[TMP16:%.*]] = mul i64 1, [[TMP15]]
34-
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP16]], i64 0
35-
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
36-
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
37-
; CHECK: vector.body:
38-
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
39-
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
40-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
41-
; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
42-
; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP17]], i32 2, i1 true)
43-
; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[EVL_BASED_IV]], 0
44-
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP19]]
45-
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i32 0
46-
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VEC_IND]], ptr align 8 [[TMP21]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
47-
; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP18]] to i64
48-
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP22]], [[EVL_BASED_IV]]
49-
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]]
50-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
51-
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
52-
; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
53-
; CHECK: middle.block:
54-
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
55-
; CHECK: scalar.ph:
56-
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
5711
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
5812
; CHECK: for.body:
59-
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
13+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
6014
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
6115
; CHECK-NEXT: store i64 [[IV]], ptr [[ARRAYIDX]], align 8
6216
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
6317
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
64-
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
18+
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
6519
; CHECK: for.cond.cleanup:
6620
; CHECK-NEXT: ret void
6721
;
@@ -85,67 +39,16 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
8539
; CHECK-LABEL: define void @test_wide_ptr_induction(
8640
; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
8741
; CHECK-NEXT: entry:
88-
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]]
89-
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
90-
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2
91-
; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
92-
; CHECK-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
93-
; CHECK: vector.ph:
94-
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
95-
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
96-
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
97-
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2
98-
; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
99-
; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
100-
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
101-
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
102-
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 8
103-
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP9]]
104-
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
105-
; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2
10642
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
107-
; CHECK: vector.body:
108-
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[B]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
109-
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
110-
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
111-
; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
112-
; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 2
113-
; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 1
114-
; CHECK-NEXT: [[TMP15:%.*]] = mul i64 8, [[TMP14]]
115-
; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP13]], 0
116-
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP16]], i64 0
117-
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
118-
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
119-
; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP17]]
120-
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP18]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
121-
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
122-
; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
123-
; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP20]], i32 2, i1 true)
124-
; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[EVL_BASED_IV]], 0
125-
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP22]]
126-
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds ptr, ptr [[TMP23]], i32 0
127-
; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[TMP19]], ptr align 8 [[TMP24]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP21]])
128-
; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP21]] to i64
129-
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[EVL_BASED_IV]]
130-
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]]
131-
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP15]]
132-
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
133-
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
134-
; CHECK: middle.block:
135-
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
136-
; CHECK: scalar.ph:
137-
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
138-
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
139-
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
14043
; CHECK: for.body:
141-
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
142-
; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
44+
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
45+
; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[VECTOR_BODY]] ], [ [[B]], [[VECTOR_PH]] ]
14346
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8
144-
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
47+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
14548
; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8
146-
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
147-
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
148-
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
49+
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw nsw i64 [[EVL_BASED_IV]], 1
50+
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
51+
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
14952
; CHECK: for.cond.cleanup:
15053
; CHECK-NEXT: ret void
15154
;
@@ -165,11 +68,3 @@ for.body:
16568
for.cond.cleanup:
16669
ret void
16770
}
168-
;.
169-
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
170-
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
171-
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
172-
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
173-
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
174-
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
175-
;.

llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll

Lines changed: 9 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -12,66 +12,18 @@
1212
define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) {
1313
; IF-EVL-LABEL: @gather_scatter(
1414
; IF-EVL-NEXT: entry:
15-
; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N:%.*]]
16-
; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
17-
; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2
18-
; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
19-
; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
20-
; IF-EVL: vector.ph:
21-
; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
22-
; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
23-
; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
24-
; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2
25-
; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
26-
; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
27-
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
28-
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
29-
; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
30-
; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2
31-
; IF-EVL-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
32-
; IF-EVL-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP11]], zeroinitializer
33-
; IF-EVL-NEXT: [[TMP13:%.*]] = mul <vscale x 2 x i64> [[TMP12]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
34-
; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP13]]
35-
; IF-EVL-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
36-
; IF-EVL-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
37-
; IF-EVL-NEXT: [[TMP16:%.*]] = mul i64 1, [[TMP15]]
38-
; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP16]], i64 0
39-
; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
40-
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
41-
; IF-EVL: vector.body:
42-
; IF-EVL-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
43-
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
44-
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
45-
; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
46-
; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP17]], i32 2, i1 true)
47-
; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], <vscale x 2 x i64> [[VEC_IND]]
48-
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP20]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
49-
; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
50-
; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP21]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
51-
; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
52-
; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> align 4 [[TMP22]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
53-
; IF-EVL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP18]] to i64
54-
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
55-
; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], [[TMP10]]
56-
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
57-
; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
58-
; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
59-
; IF-EVL: middle.block:
60-
; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
61-
; IF-EVL: scalar.ph:
62-
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
6315
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
6416
; IF-EVL: for.body:
65-
; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
66-
; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV]]
67-
; IF-EVL-NEXT: [[TMP25:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
68-
; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP25]]
69-
; IF-EVL-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
70-
; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP25]]
71-
; IF-EVL-NEXT: store float [[TMP26]], ptr [[ARRAYIDX7]], align 4
17+
; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
18+
; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]]
19+
; IF-EVL-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
20+
; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]]
21+
; IF-EVL-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
22+
; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]]
23+
; IF-EVL-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4
7224
; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
73-
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
74-
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
25+
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
26+
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
7527
; IF-EVL: for.end:
7628
; IF-EVL-NEXT: ret void
7729
;

0 commit comments

Comments
 (0)