Skip to content

Commit fec0384

Browse files
committed
[LV] Optimize VPWidenIntOrFpInductionRecipe for known TC
Optimize the IR generated for a VPWidenIntOrFpInductionRecipe to use the narrowest type necessary, when the trip-count of a loop is known to be constant and the only use of the recipe is the condition used by the vector loop's backedge branch.
1 parent 276e940 commit fec0384

13 files changed

+4515
-154
lines changed

llvm/lib/Transforms/Vectorize/VPlan.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1843,6 +1843,9 @@ class VPWidenIntOrFpInductionRecipe : public VPWidenInductionRecipe {
18431843
VPSlotTracker &SlotTracker) const override;
18441844
#endif
18451845

1846+
/// Update the step value of the recipe.
1847+
void setStepValue(VPValue *V) { setOperand(1, V); }
1848+
18461849
VPValue *getVFValue() { return getOperand(2); }
18471850
const VPValue *getVFValue() const { return getOperand(2); }
18481851

llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp

Lines changed: 90 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "VPlanPatternMatch.h"
2121
#include "VPlanUtils.h"
2222
#include "VPlanVerifier.h"
23+
#include "llvm/ADT/APInt.h"
2324
#include "llvm/ADT/PostOrderIterator.h"
2425
#include "llvm/ADT/STLExtras.h"
2526
#include "llvm/ADT/SetVector.h"
@@ -29,6 +30,8 @@
2930
#include "llvm/Analysis/VectorUtils.h"
3031
#include "llvm/IR/Intrinsics.h"
3132
#include "llvm/IR/PatternMatch.h"
33+
#include "llvm/Support/Casting.h"
34+
#include "llvm/Support/TypeSize.h"
3235

3336
using namespace llvm;
3437

@@ -975,11 +978,74 @@ void VPlanTransforms::simplifyRecipes(VPlan &Plan, Type &CanonicalIVTy) {
975978
}
976979
}
977980

978-
void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
979-
unsigned BestUF,
980-
PredicatedScalarEvolution &PSE) {
981-
assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
982-
assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
981+
/// Optimize the width of vector induction variables in \p Plan based on a known
982+
/// constant Trip Count, \p BestVF and \p BestUF.
983+
static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
984+
ElementCount BestVF,
985+
unsigned BestUF) {
986+
// Only proceed if we have not completely removed the vector region.
987+
if (!Plan.getVectorLoopRegion())
988+
return false;
989+
990+
auto *TC = dyn_cast_if_present<ConstantInt>(
991+
Plan.getTripCount()->getUnderlyingValue());
992+
if (!TC || !BestVF.isFixed())
993+
return false;
994+
995+
// Calculate the widest type required for known TC, VF and UF.
996+
auto ComputeBitWidth = [](APInt TC, uint64_t Align) {
997+
auto AlignedTC =
998+
Align * APIntOps::RoundingUDiv(TC, APInt(TC.getBitWidth(), Align),
999+
APInt::Rounding::UP);
1000+
auto MaxVal = AlignedTC - 1;
1001+
return std::max<unsigned>(PowerOf2Ceil(MaxVal.getActiveBits()), 8);
1002+
};
1003+
unsigned NewBitWidth =
1004+
ComputeBitWidth(TC->getValue(), BestVF.getKnownMinValue() * BestUF);
1005+
1006+
LLVMContext &Ctx = Plan.getCanonicalIV()->getScalarType()->getContext();
1007+
auto *NewIVTy = IntegerType::get(Ctx, NewBitWidth);
1008+
1009+
bool MadeChange = false;
1010+
1011+
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1012+
for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
1013+
auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
1014+
if (!WideIV || !WideIV->isCanonical() ||
1015+
WideIV->hasMoreThanOneUniqueUser() ||
1016+
NewIVTy == WideIV->getScalarType())
1017+
continue;
1018+
1019+
// Currently only handle cases where the single user is a header-mask
1020+
// comparison with the backedge-taken-count.
1021+
using namespace VPlanPatternMatch;
1022+
if (!match(*WideIV->user_begin(),
1023+
m_Binary<Instruction::ICmp>(
1024+
m_Specific(WideIV),
1025+
m_Specific(Plan.getOrCreateBackedgeTakenCount()))))
1026+
continue;
1027+
1028+
// Update IV operands and comparison bound to use new narrower type.
1029+
auto *NewStart = Plan.getOrAddLiveIn(ConstantInt::get(NewIVTy, 0));
1030+
WideIV->setStartValue(NewStart);
1031+
auto *NewStep = Plan.getOrAddLiveIn(ConstantInt::get(NewIVTy, 1));
1032+
WideIV->setStepValue(NewStep);
1033+
1034+
auto *NewBTC = new VPWidenCastRecipe(
1035+
Instruction::Trunc, Plan.getOrCreateBackedgeTakenCount(), NewIVTy);
1036+
Plan.getVectorPreheader()->appendRecipe(NewBTC);
1037+
auto *Cmp = dyn_cast<VPInstruction>(*WideIV->user_begin());
1038+
Cmp->setOperand(1, NewBTC);
1039+
1040+
MadeChange = true;
1041+
}
1042+
1043+
return MadeChange;
1044+
}
1045+
1046+
bool VPlanTransforms::simplifyBranchConditionForVFAndUF(
1047+
VPlan &Plan, ElementCount BestVF, unsigned BestUF,
1048+
PredicatedScalarEvolution &PSE) {
9831049
VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
9841050
VPBasicBlock *ExitingVPBB = VectorRegion->getExitingBasicBlock();
9851051
auto *Term = &ExitingVPBB->back();
@@ -992,7 +1058,7 @@ void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
9921058
if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
9931059
!match(Term,
9941060
m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
995-
return;
1061+
return false;
9961062

9971063
ScalarEvolution &SE = *PSE.getSE();
9981064
const SCEV *TripCount =
@@ -1003,7 +1069,7 @@ void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
10031069
const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
10041070
if (TripCount->isZero() ||
10051071
!SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
1006-
return;
1072+
return false;
10071073

10081074
// The vector loop region only executes once. If possible, completely remove
10091075
// the region, otherwise replace the terminator controlling the latch with
@@ -1042,8 +1108,23 @@ void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
10421108

10431109
Term->eraseFromParent();
10441110

1045-
Plan.setVF(BestVF);
1046-
Plan.setUF(BestUF);
1111+
return true;
1112+
}
1113+
1114+
void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
1115+
unsigned BestUF,
1116+
PredicatedScalarEvolution &PSE) {
1117+
assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
1118+
assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
1119+
1120+
bool MadeChange =
1121+
simplifyBranchConditionForVFAndUF(Plan, BestVF, BestUF, PSE);
1122+
MadeChange |= optimizeVectorInductionWidthForTCAndVFUF(Plan, BestVF, BestUF);
1123+
1124+
if (MadeChange) {
1125+
Plan.setVF(BestVF);
1126+
Plan.setUF(BestUF);
1127+
}
10471128
// TODO: Further simplifications are possible
10481129
// 1. Replace inductions with constants.
10491130
// 2. Replace vector loop region with VPBasicBlock.

llvm/lib/Transforms/Vectorize/VPlanTransforms.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,13 @@ struct VPlanTransforms {
8282
unsigned BestUF,
8383
PredicatedScalarEvolution &PSE);
8484

85+
/// Try to simplify the branch condition of \p Plan. This may restrict the
86+
/// resulting plan to \p BestVF and \p BestUF.
87+
static bool simplifyBranchConditionForVFAndUF(VPlan &Plan,
88+
ElementCount BestVF,
89+
unsigned BestUF,
90+
PredicatedScalarEvolution &PSE);
91+
8592
/// Apply VPlan-to-VPlan optimizations to \p Plan, including induction recipe
8693
/// optimizations, dead recipe removal, replicate region optimizations and
8794
/// block merging.

llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -389,8 +389,8 @@ define void @latch_branch_cost(ptr %dst) {
389389
; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
390390
; PRED: vector.body:
391391
; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
392-
; PRED-NEXT: [[VEC_IND:%.*]] = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
393-
; PRED-NEXT: [[TMP0:%.*]] = icmp ule <8 x i64> [[VEC_IND]], splat (i64 99)
392+
; PRED-NEXT: [[VEC_IND:%.*]] = phi <8 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
393+
; PRED-NEXT: [[TMP0:%.*]] = icmp ule <8 x i8> [[VEC_IND]], splat (i8 99)
394394
; PRED-NEXT: [[TMP1:%.*]] = extractelement <8 x i1> [[TMP0]], i32 0
395395
; PRED-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
396396
; PRED: pred.store.if:
@@ -456,7 +456,7 @@ define void @latch_branch_cost(ptr %dst) {
456456
; PRED-NEXT: br label [[PRED_STORE_CONTINUE6]]
457457
; PRED: pred.store.continue14:
458458
; PRED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
459-
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8)
459+
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
460460
; PRED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 104
461461
; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
462462
; PRED: middle.block:
@@ -903,9 +903,9 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
903903
; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
904904
; DEFAULT: vector.body:
905905
; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE14:%.*]] ]
906-
; DEFAULT-NEXT: [[VEC_IND:%.*]] = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ]
906+
; DEFAULT-NEXT: [[VEC_IND:%.*]] = phi <8 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ]
907907
; DEFAULT-NEXT: [[TMP0:%.*]] = trunc i64 [[INDEX]] to i8
908-
; DEFAULT-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IND]], splat (i64 6)
908+
; DEFAULT-NEXT: [[TMP1:%.*]] = icmp ule <8 x i8> [[VEC_IND]], splat (i8 6)
909909
; DEFAULT-NEXT: [[TMP2:%.*]] = extractelement <8 x i1> [[TMP1]], i32 0
910910
; DEFAULT-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
911911
; DEFAULT: pred.store.if:
@@ -978,7 +978,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
978978
; DEFAULT-NEXT: store i8 [[TMP33]], ptr [[TMP32]], align 1
979979
; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE14]]
980980
; DEFAULT: pred.store.continue14:
981-
; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8)
981+
; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
982982
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
983983
; DEFAULT-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
984984
; DEFAULT: middle.block:
@@ -1005,9 +1005,9 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
10051005
; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
10061006
; PRED: vector.body:
10071007
; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE14:%.*]] ]
1008-
; PRED-NEXT: [[VEC_IND:%.*]] = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ]
1008+
; PRED-NEXT: [[VEC_IND:%.*]] = phi <8 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE14]] ]
10091009
; PRED-NEXT: [[TMP0:%.*]] = trunc i64 [[INDEX]] to i8
1010-
; PRED-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IND]], splat (i64 6)
1010+
; PRED-NEXT: [[TMP1:%.*]] = icmp ule <8 x i8> [[VEC_IND]], splat (i8 6)
10111011
; PRED-NEXT: [[TMP2:%.*]] = extractelement <8 x i1> [[TMP1]], i32 0
10121012
; PRED-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
10131013
; PRED: pred.store.if:
@@ -1080,7 +1080,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
10801080
; PRED-NEXT: store i8 [[TMP33]], ptr [[TMP32]], align 1
10811081
; PRED-NEXT: br label [[PRED_STORE_CONTINUE14]]
10821082
; PRED: pred.store.continue14:
1083-
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8)
1083+
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
10841084
; PRED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
10851085
; PRED-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
10861086
; PRED: middle.block:

llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@ define void @func_21() {
1919
; CHECK: vector.body:
2020
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
2121
; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[PRED_STORE_CONTINUE4]] ]
22-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE4]] ]
22+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i8> [ <i8 0, i8 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE4]] ]
2323
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
2424
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
25-
; CHECK-NEXT: [[TMP2:%.*]] = icmp ule <2 x i64> [[VEC_IND]], splat (i64 4)
25+
; CHECK-NEXT: [[TMP2:%.*]] = icmp ule <2 x i8> [[VEC_IND]], splat (i8 4)
2626
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0
2727
; CHECK-NEXT: br i1 [[TMP3]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
2828
; CHECK: pred.load.if:
@@ -59,7 +59,7 @@ define void @func_21() {
5959
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
6060
; CHECK: pred.store.continue4:
6161
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
62-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
62+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i8> [[VEC_IND]], splat (i8 2)
6363
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 6
6464
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
6565
; CHECK: middle.block:

llvm/test/Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ attributes #0 = { "target-cpu"="knl" }
8686
; FORCE-NEXT: br label [[VECTOR_BODY:%.*]]
8787
; FORCE: vector.body:
8888
; FORCE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
89-
; FORCE-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE4]] ]
90-
; FORCE-NEXT: [[TMP2:%.*]] = icmp ule <2 x i32> [[VEC_IND]], splat (i32 2)
89+
; FORCE-NEXT: [[VEC_IND:%.*]] = phi <2 x i8> [ <i8 0, i8 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE4]] ]
90+
; FORCE-NEXT: [[TMP2:%.*]] = icmp ule <2 x i8> [[VEC_IND]], splat (i8 2)
9191
; FORCE-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0
9292
; FORCE-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
9393
; FORCE: pred.store.if:
@@ -103,7 +103,7 @@ attributes #0 = { "target-cpu"="knl" }
103103
; FORCE-NEXT: br label [[PRED_STORE_CONTINUE4]]
104104
; FORCE: pred.store.continue2:
105105
; FORCE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
106-
; FORCE-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
106+
; FORCE-NEXT: [[VEC_IND_NEXT]] = add <2 x i8> [[VEC_IND]], splat (i8 2)
107107
; FORCE-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], 4
108108
; FORCE-NEXT: br i1 [[TMP15]], label {{%.*}}, label [[VECTOR_BODY]]
109109
;

llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ define void @pr45679(ptr %A) optsize {
1818
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1919
; CHECK: vector.body:
2020
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
21-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
22-
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i32> [[VEC_IND]], splat (i32 13)
21+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i8> [ <i8 0, i8 1, i8 2, i8 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
22+
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i8> [[VEC_IND]], splat (i8 13)
2323
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0
2424
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
2525
; CHECK: pred.store.if:
@@ -53,7 +53,7 @@ define void @pr45679(ptr %A) optsize {
5353
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
5454
; CHECK: pred.store.continue6:
5555
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
56-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
56+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i8> [[VEC_IND]], splat (i8 4)
5757
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16
5858
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
5959
; CHECK: middle.block:
@@ -213,8 +213,8 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
213213
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
214214
; CHECK: vector.body:
215215
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
216-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
217-
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IND]], splat (i64 13)
216+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i8> [ <i8 0, i8 1, i8 2, i8 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
217+
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i8> [[VEC_IND]], splat (i8 13)
218218
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0
219219
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
220220
; CHECK: pred.store.if:
@@ -252,7 +252,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
252252
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
253253
; CHECK: pred.store.continue6:
254254
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
255-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
255+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i8> [[VEC_IND]], splat (i8 4)
256256
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
257257
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
258258
; CHECK: middle.block:

0 commit comments

Comments
 (0)