Skip to content

Commit 8bfdbb9

Browse files
authored
[InstCombine] Remove redundant shift folds (NFCI) (#90016)
These are already handled by canEvaluateShifted/getShiftedValue (one-use only), and also in reassociateShiftAmtsOfTwoSameDirectionShifts (also multi-use), so let's at least get rid of the *third* implementation...
1 parent 565bdb5 commit 8bfdbb9

File tree

1 file changed

+0
-16
lines changed

1 file changed

+0
-16
lines changed

llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1120,14 +1120,6 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
11201120
return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(Ty, Mask));
11211121
}
11221122

1123-
if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
1124-
unsigned AmtSum = ShAmtC + C1->getZExtValue();
1125-
// Oversized shifts are simplified to zero in InstSimplify.
1126-
if (AmtSum < BitWidth)
1127-
// (X << C1) << C2 --> X << (C1 + C2)
1128-
return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
1129-
}
1130-
11311123
// If we have an opposite shift by the same amount, we may be able to
11321124
// reorder binops and shifts to eliminate math/logic.
11331125
auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) {
@@ -1394,14 +1386,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
13941386
}
13951387
}
13961388

1397-
// (X >>u C1) >>u C --> X >>u (C1 + C)
1398-
if (match(Op0, m_LShr(m_Value(X), m_APInt(C1)))) {
1399-
// Oversized shifts are simplified to zero in InstSimplify.
1400-
unsigned AmtSum = ShAmtC + C1->getZExtValue();
1401-
if (AmtSum < BitWidth)
1402-
return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
1403-
}
1404-
14051389
Instruction *TruncSrc;
14061390
if (match(Op0, m_OneUse(m_Trunc(m_Instruction(TruncSrc)))) &&
14071391
match(TruncSrc, m_LShr(m_Value(X), m_APInt(C1)))) {

0 commit comments

Comments
 (0)