@@ -1267,6 +1267,18 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1267
1267
match (Op1, m_SpecificIntAllowUndef (BitWidth - 1 )))
1268
1268
return new ZExtInst (Builder.CreateIsNotNeg (X, " isnotneg" ), Ty);
1269
1269
1270
+ // If both the add and the shift are nuw, then:
1271
+ // ((X << Y) + Z) nuw >>u Z --> X + (Y nuw >>u Z) nuw
1272
+ Value *Y;
1273
+ if (match (Op0, m_OneUse (m_c_NUWAdd ((m_NUWShl (m_Value (X), m_Specific (Op1))),
1274
+ m_Value (Y))))) {
1275
+ Value *NewLshr = Builder.CreateLShr (Y, Op1, " " , I.isExact ());
1276
+ auto *newAdd = BinaryOperator::CreateNUWAdd (NewLshr, X);
1277
+ if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0))
1278
+ newAdd->setHasNoSignedWrap (Op0Bin->hasNoSignedWrap ());
1279
+ return newAdd;
1280
+ }
1281
+
1270
1282
if (match (Op1, m_APInt (C))) {
1271
1283
unsigned ShAmtC = C->getZExtValue ();
1272
1284
auto *II = dyn_cast<IntrinsicInst>(Op0);
@@ -1283,7 +1295,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1283
1295
return new ZExtInst (Cmp, Ty);
1284
1296
}
1285
1297
1286
- Value *X;
1287
1298
const APInt *C1;
1288
1299
if (match (Op0, m_Shl (m_Value (X), m_APInt (C1))) && C1->ult (BitWidth)) {
1289
1300
if (C1->ult (ShAmtC)) {
@@ -1328,7 +1339,7 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1328
1339
// ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
1329
1340
// TODO: Consolidate with the more general transform that starts from shl
1330
1341
// (the shifts are in the opposite order).
1331
- Value *Y;
1342
+
1332
1343
if (match (Op0,
1333
1344
m_OneUse (m_c_Add (m_OneUse (m_Shl (m_Value (X), m_Specific (Op1))),
1334
1345
m_Value (Y))))) {
@@ -1450,9 +1461,24 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1450
1461
NewMul->setHasNoSignedWrap (true );
1451
1462
return NewMul;
1452
1463
}
1464
+
1465
+ // Special case: lshr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
1466
+ if (ShAmtC == 1 && MulC->getZExtValue () == 3 ) {
1467
+ auto *NewAdd = BinaryOperator::CreateNUWAdd (
1468
+ X,
1469
+ Builder.CreateLShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ()));
1470
+ NewAdd->setHasNoSignedWrap (true );
1471
+ return NewAdd;
1472
+ }
1453
1473
}
1454
1474
}
1455
1475
1476
+ // // lshr nsw (mul (X, 3), 1) -> add nsw (X, lshr(X, 1)
1477
+ if (match (Op0, m_OneUse (m_NSWMul (m_Value (X), m_SpecificInt (3 )))) &&
1478
+ ShAmtC == 1 )
1479
+ return BinaryOperator::CreateNSWAdd (
1480
+ X, Builder.CreateLShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ()));
1481
+
1456
1482
// Try to narrow bswap.
1457
1483
// In the case where the shift amount equals the bitwidth difference, the
1458
1484
// shift is eliminated.
@@ -1656,6 +1682,26 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
1656
1682
if (match (Op0, m_OneUse (m_NSWSub (m_Value (X), m_Value (Y)))))
1657
1683
return new SExtInst (Builder.CreateICmpSLT (X, Y), Ty);
1658
1684
}
1685
+
1686
+ // Special case: ashr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
1687
+ if (match (Op0, m_OneUse (m_NSWMul (m_Value (X), m_SpecificInt (3 )))) &&
1688
+ ShAmt == 1 ) {
1689
+ Value *Shift;
1690
+ if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0)) {
1691
+ if (Op0Bin->hasNoUnsignedWrap ())
1692
+ // We can use lshr if the mul is nuw and nsw
1693
+ Shift =
1694
+ Builder.CreateLShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ());
1695
+ else
1696
+ Shift =
1697
+ Builder.CreateAShr (X, ConstantInt::get (Ty, 1 ), " " , I.isExact ());
1698
+
1699
+ auto *NewAdd = BinaryOperator::CreateNSWAdd (X, Shift);
1700
+ NewAdd->setHasNoUnsignedWrap (Op0Bin->hasNoUnsignedWrap ());
1701
+
1702
+ return NewAdd;
1703
+ }
1704
+ }
1659
1705
}
1660
1706
1661
1707
const SimplifyQuery Q = SQ.getWithInstruction (&I);
0 commit comments