Skip to content

Commit ea429e1

Browse files
[SLP]Do extra analysis int minbitwidth if some checks return false.
The instruction itself can be considered good for minbitwidth casting, even if one of the operand checks returns false. Reviewers: RKSimon Reviewed By: RKSimon Pull Request: #84363
1 parent 4b0276d commit ea429e1

File tree

2 files changed

+70
-41
lines changed

2 files changed

+70
-41
lines changed

llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp

Lines changed: 59 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -10225,9 +10225,11 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
1022510225
for (const TreeEntry *TE : ForRemoval)
1022610226
Set.erase(TE);
1022710227
}
10228+
bool NeedToRemapValues = false;
1022810229
for (auto *It = UsedTEs.begin(); It != UsedTEs.end();) {
1022910230
if (It->empty()) {
1023010231
UsedTEs.erase(It);
10232+
NeedToRemapValues = true;
1023110233
continue;
1023210234
}
1023310235
std::advance(It, 1);
@@ -10236,6 +10238,19 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
1023610238
Entries.clear();
1023710239
return std::nullopt;
1023810240
}
10241+
// Recalculate the mapping between the values and entries sets.
10242+
if (NeedToRemapValues) {
10243+
DenseMap<Value *, int> PrevUsedValuesEntry;
10244+
PrevUsedValuesEntry.swap(UsedValuesEntry);
10245+
for (auto [Idx, Set] : enumerate(UsedTEs)) {
10246+
DenseSet<Value *> Values;
10247+
for (const TreeEntry *E : Set)
10248+
Values.insert(E->Scalars.begin(), E->Scalars.end());
10249+
for (const auto &P : PrevUsedValuesEntry)
10250+
if (Values.contains(P.first))
10251+
UsedValuesEntry.try_emplace(P.first, Idx);
10252+
}
10253+
}
1023910254
}
1024010255

1024110256
unsigned VF = 0;
@@ -14001,6 +14016,33 @@ bool BoUpSLP::collectValuesToDemote(
1400114016
};
1400214017
unsigned Start = 0;
1400314018
unsigned End = I->getNumOperands();
14019+
14020+
auto FinalAnalysis = [&](const TreeEntry *ITE = nullptr) {
14021+
if (!IsProfitableToDemote)
14022+
return false;
14023+
return (ITE && ITE->UserTreeIndices.size() > 1) ||
14024+
IsPotentiallyTruncated(I, BitWidth);
14025+
};
14026+
auto ProcessOperands = [&](ArrayRef<Value *> Operands, bool &NeedToExit) {
14027+
NeedToExit = false;
14028+
unsigned InitLevel = MaxDepthLevel;
14029+
for (Value *IncValue : Operands) {
14030+
unsigned Level = InitLevel;
14031+
if (!collectValuesToDemote(IncValue, IsProfitableToDemoteRoot, BitWidth,
14032+
ToDemote, DemotedConsts, Visited, Level,
14033+
IsProfitableToDemote, IsTruncRoot)) {
14034+
if (!IsProfitableToDemote)
14035+
return false;
14036+
NeedToExit = true;
14037+
if (!FinalAnalysis(ITE))
14038+
return false;
14039+
continue;
14040+
}
14041+
MaxDepthLevel = std::max(MaxDepthLevel, Level);
14042+
}
14043+
return true;
14044+
};
14045+
bool NeedToExit = false;
1400414046
switch (I->getOpcode()) {
1400514047

1400614048
// We can always demote truncations and extensions. Since truncations can
@@ -14026,35 +14068,21 @@ bool BoUpSLP::collectValuesToDemote(
1402614068
case Instruction::And:
1402714069
case Instruction::Or:
1402814070
case Instruction::Xor: {
14029-
unsigned Level1, Level2;
14030-
if ((ITE->UserTreeIndices.size() > 1 &&
14031-
!IsPotentiallyTruncated(I, BitWidth)) ||
14032-
!collectValuesToDemote(I->getOperand(0), IsProfitableToDemoteRoot,
14033-
BitWidth, ToDemote, DemotedConsts, Visited,
14034-
Level1, IsProfitableToDemote, IsTruncRoot) ||
14035-
!collectValuesToDemote(I->getOperand(1), IsProfitableToDemoteRoot,
14036-
BitWidth, ToDemote, DemotedConsts, Visited,
14037-
Level2, IsProfitableToDemote, IsTruncRoot))
14071+
if (ITE->UserTreeIndices.size() > 1 && !IsPotentiallyTruncated(I, BitWidth))
14072+
return false;
14073+
if (!ProcessOperands({I->getOperand(0), I->getOperand(1)}, NeedToExit))
1403814074
return false;
14039-
MaxDepthLevel = std::max(Level1, Level2);
1404014075
break;
1404114076
}
1404214077

1404314078
// We can demote selects if we can demote their true and false values.
1404414079
case Instruction::Select: {
14080+
if (ITE->UserTreeIndices.size() > 1 && !IsPotentiallyTruncated(I, BitWidth))
14081+
return false;
1404514082
Start = 1;
14046-
unsigned Level1, Level2;
14047-
SelectInst *SI = cast<SelectInst>(I);
14048-
if ((ITE->UserTreeIndices.size() > 1 &&
14049-
!IsPotentiallyTruncated(I, BitWidth)) ||
14050-
!collectValuesToDemote(SI->getTrueValue(), IsProfitableToDemoteRoot,
14051-
BitWidth, ToDemote, DemotedConsts, Visited,
14052-
Level1, IsProfitableToDemote, IsTruncRoot) ||
14053-
!collectValuesToDemote(SI->getFalseValue(), IsProfitableToDemoteRoot,
14054-
BitWidth, ToDemote, DemotedConsts, Visited,
14055-
Level2, IsProfitableToDemote, IsTruncRoot))
14083+
auto *SI = cast<SelectInst>(I);
14084+
if (!ProcessOperands({SI->getTrueValue(), SI->getFalseValue()}, NeedToExit))
1405614085
return false;
14057-
MaxDepthLevel = std::max(Level1, Level2);
1405814086
break;
1405914087
}
1406014088

@@ -14065,22 +14093,20 @@ bool BoUpSLP::collectValuesToDemote(
1406514093
MaxDepthLevel = 0;
1406614094
if (ITE->UserTreeIndices.size() > 1 && !IsPotentiallyTruncated(I, BitWidth))
1406714095
return false;
14068-
for (Value *IncValue : PN->incoming_values()) {
14069-
unsigned Level;
14070-
if (!collectValuesToDemote(IncValue, IsProfitableToDemoteRoot, BitWidth,
14071-
ToDemote, DemotedConsts, Visited, Level,
14072-
IsProfitableToDemote, IsTruncRoot))
14073-
return false;
14074-
MaxDepthLevel = std::max(MaxDepthLevel, Level);
14075-
}
14096+
SmallVector<Value *> Ops(PN->incoming_values().begin(),
14097+
PN->incoming_values().end());
14098+
if (!ProcessOperands(Ops, NeedToExit))
14099+
return false;
1407614100
break;
1407714101
}
1407814102

1407914103
// Otherwise, conservatively give up.
1408014104
default:
1408114105
MaxDepthLevel = 1;
14082-
return IsProfitableToDemote && IsPotentiallyTruncated(I, BitWidth);
14106+
return FinalAnalysis();
1408314107
}
14108+
if (NeedToExit)
14109+
return true;
1408414110

1408514111
++MaxDepthLevel;
1408614112
// Gather demoted constant operands.
@@ -14119,15 +14145,17 @@ void BoUpSLP::computeMinimumValueSizes() {
1411914145

1412014146
// The first value node for store/insertelement is sext/zext/trunc? Skip it,
1412114147
// resize to the final type.
14148+
bool IsTruncRoot = false;
1412214149
bool IsProfitableToDemoteRoot = !IsStoreOrInsertElt;
1412314150
if (NodeIdx != 0 &&
1412414151
VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
1412514152
(VectorizableTree[NodeIdx]->getOpcode() == Instruction::ZExt ||
1412614153
VectorizableTree[NodeIdx]->getOpcode() == Instruction::SExt ||
1412714154
VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc)) {
1412814155
assert(IsStoreOrInsertElt && "Expected store/insertelement seeded graph.");
14129-
++NodeIdx;
14156+
IsTruncRoot = VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc;
1413014157
IsProfitableToDemoteRoot = true;
14158+
++NodeIdx;
1413114159
}
1413214160

1413314161
// Analyzed in reduction already and not profitable - exit.
@@ -14259,7 +14287,6 @@ void BoUpSLP::computeMinimumValueSizes() {
1425914287
ReductionBitWidth = bit_ceil(ReductionBitWidth);
1426014288
}
1426114289
bool IsTopRoot = NodeIdx == 0;
14262-
bool IsTruncRoot = false;
1426314290
while (NodeIdx < VectorizableTree.size() &&
1426414291
VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
1426514292
VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {

llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ for.end: ; preds = %for.end.loopexit, %
228228
; YAML-NEXT: Function: test_unrolled_select
229229
; YAML-NEXT: Args:
230230
; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
231-
; YAML-NEXT: - Cost: '-36'
231+
; YAML-NEXT: - Cost: '-40'
232232
; YAML-NEXT: - String: ' and with tree size '
233233
; YAML-NEXT: - TreeSize: '10'
234234

@@ -246,15 +246,17 @@ define i32 @test_unrolled_select(ptr noalias nocapture readonly %blk1, ptr noali
246246
; CHECK-NEXT: [[P2_045:%.*]] = phi ptr [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR88:%.*]], [[IF_END_86]] ]
247247
; CHECK-NEXT: [[P1_044:%.*]] = phi ptr [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[IF_END_86]] ]
248248
; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[P1_044]], align 1
249-
; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i32>
249+
; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
250250
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr [[P2_045]], align 1
251-
; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i32>
252-
; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <8 x i32> [[TMP1]], [[TMP3]]
253-
; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <8 x i32> [[TMP4]], zeroinitializer
254-
; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <8 x i32> zeroinitializer, [[TMP4]]
255-
; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP5]], <8 x i32> [[TMP6]], <8 x i32> [[TMP4]]
256-
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP7]])
257-
; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP8]], [[S_047]]
251+
; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i16>
252+
; CHECK-NEXT: [[TMP4:%.*]] = sub <8 x i16> [[TMP1]], [[TMP3]]
253+
; CHECK-NEXT: [[TMP5:%.*]] = trunc <8 x i16> [[TMP4]] to <8 x i1>
254+
; CHECK-NEXT: [[TMP6:%.*]] = icmp slt <8 x i1> [[TMP5]], zeroinitializer
255+
; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i16> zeroinitializer, [[TMP4]]
256+
; CHECK-NEXT: [[TMP8:%.*]] = select <8 x i1> [[TMP6]], <8 x i16> [[TMP7]], <8 x i16> [[TMP4]]
257+
; CHECK-NEXT: [[TMP9:%.*]] = zext <8 x i16> [[TMP8]] to <8 x i32>
258+
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP9]])
259+
; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP10]], [[S_047]]
258260
; CHECK-NEXT: [[CMP83:%.*]] = icmp slt i32 [[OP_RDX]], [[LIM:%.*]]
259261
; CHECK-NEXT: br i1 [[CMP83]], label [[IF_END_86]], label [[FOR_END_LOOPEXIT:%.*]]
260262
; CHECK: if.end.86:

0 commit comments

Comments
 (0)