@@ -1267,6 +1267,19 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1267
1267
match (Op1, m_SpecificIntAllowUndef (BitWidth - 1 )))
1268
1268
return new ZExtInst (Builder.CreateIsNotNeg (X, " isnotneg" ), Ty);
1269
1269
1270
+ // Special Case:
1271
+ // if both the add and the shift are nuw, we can omit the AND entirely
1272
+ // ((X << Y) nuw + Z nuw) >>u Z --> (X + (Y >>u Z))
1273
+ Value *Y;
1274
+ if (match (Op0, m_OneUse (m_c_NUWAdd ((m_NUWShl (m_Value (X), m_Specific (Op1))),
1275
+ m_Value (Y))))) {
1276
+ Value *NewLshr = Builder.CreateLShr (Y, Op1, " " , I.isExact ());
1277
+ auto *newAdd = BinaryOperator::CreateNUWAdd (NewLshr, X);
1278
+ if (auto *Op0Bin = cast<BinaryOperator>(Op0))
1279
+ newAdd->setHasNoSignedWrap (Op0Bin->hasNoSignedWrap ());
1280
+ return newAdd;
1281
+ }
1282
+
1270
1283
if (match (Op1, m_APInt (C))) {
1271
1284
unsigned ShAmtC = C->getZExtValue ();
1272
1285
auto *II = dyn_cast<IntrinsicInst>(Op0);
@@ -1283,7 +1296,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1283
1296
return new ZExtInst (Cmp, Ty);
1284
1297
}
1285
1298
1286
- Value *X;
1287
1299
const APInt *C1;
1288
1300
if (match (Op0, m_Shl (m_Value (X), m_APInt (C1))) && C1->ult (BitWidth)) {
1289
1301
if (C1->ult (ShAmtC)) {
@@ -1328,7 +1340,7 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1328
1340
// ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
1329
1341
// TODO: Consolidate with the more general transform that starts from shl
1330
1342
// (the shifts are in the opposite order).
1331
- Value *Y;
1343
+
1332
1344
if (match (Op0,
1333
1345
m_OneUse (m_c_Add (m_OneUse (m_Shl (m_Value (X), m_Specific (Op1))),
1334
1346
m_Value (Y))))) {
@@ -1450,9 +1462,25 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1450
1462
NewMul->setHasNoSignedWrap (true );
1451
1463
return NewMul;
1452
1464
}
1465
+
1466
+ // Special case:
1467
+ // lshr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
1468
+ if (ShAmtC == 1 && MulC->getZExtValue () == 3 ) {
1469
+ auto *NewAdd = BinaryOperator::CreateNUWAdd (
1470
+ X,
1471
+ Builder.CreateLShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ()));
1472
+ NewAdd->setHasNoSignedWrap (true );
1473
+ return NewAdd;
1474
+ }
1453
1475
}
1454
1476
}
1455
1477
1478
+ // // lshr nsw (mul (X, 3), 1) -> add nsw (X, lshr(X, 1)
1479
+ if (match (Op0, m_OneUse (m_NSWMul (m_Value (X), m_SpecificInt (3 )))) &&
1480
+ ShAmtC == 1 )
1481
+ return BinaryOperator::CreateNSWAdd (
1482
+ X, Builder.CreateLShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ()));
1483
+
1456
1484
// Try to narrow bswap.
1457
1485
// In the case where the shift amount equals the bitwidth difference, the
1458
1486
// shift is eliminated.
@@ -1656,6 +1684,26 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
1656
1684
if (match (Op0, m_OneUse (m_NSWSub (m_Value (X), m_Value (Y)))))
1657
1685
return new SExtInst (Builder.CreateICmpSLT (X, Y), Ty);
1658
1686
}
1687
+
1688
+ // Special case: ashr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
1689
+ if (match (Op0, m_OneUse (m_NSWMul (m_Value (X), m_SpecificInt (3 )))) &&
1690
+ ShAmt == 1 ) {
1691
+ Value *Shift;
1692
+ if (auto *Op0Bin = cast<BinaryOperator>(Op0)) {
1693
+ if (Op0Bin->hasNoUnsignedWrap ())
1694
+ // We can use lshr if the mul is nuw and nsw
1695
+ Shift =
1696
+ Builder.CreateLShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ());
1697
+ else
1698
+ Shift =
1699
+ Builder.CreateAShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ());
1700
+
1701
+ auto *NewAdd = BinaryOperator::CreateNSWAdd (X, Shift);
1702
+ NewAdd->setHasNoUnsignedWrap (Op0Bin->hasNoUnsignedWrap ());
1703
+
1704
+ return NewAdd;
1705
+ }
1706
+ }
1659
1707
}
1660
1708
1661
1709
const SimplifyQuery Q = SQ.getWithInstruction (&I);
0 commit comments