Skip to content

Commit 1916044

Browse files
committed
[InstCombine] lshr (mul (X, 2^N + 1)), N -> add (X, lshr(X, N))
A generalization of the proposed x * 3/2 -> x + (x >> 1) transformation. Proof: https://alive2.llvm.org/ce/z/U7DWp4
1 parent 99480a1 commit 1916044

File tree

2 files changed

+21
-11
lines changed

2 files changed

+21
-11
lines changed

llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1411,13 +1411,23 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
14111411

14121412
const APInt *MulC;
14131413
if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC)))) {
1414-
// Look for a "splat" mul pattern - it replicates bits across each half of
1415-
// a value, so a right shift is just a mask of the low bits:
1416-
// lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
1417-
// TODO: Generalize to allow more than just half-width shifts?
1418-
if (BitWidth > 2 && ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
1419-
MulC->logBase2() == ShAmtC)
1420-
return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
1414+
if ((*MulC - 1).isPowerOf2() && MulC->logBase2() == ShAmtC) {
1415+
// Look for a "splat" mul pattern - it replicates bits across each half
1416+
// of a value, so a right shift is just a mask of the low bits:
1417+
// lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
1418+
if (BitWidth > 2 && ShAmtC * 2 == BitWidth)
1419+
return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
1420+
1421+
// lshr (mul (X, 2^N + 1)), N -> add (X, lshr(X, N))
1422+
if (Op0->hasOneUse()) {
1423+
auto *NewAdd = BinaryOperator::CreateNUWAdd(
1424+
X, Builder.CreateLShr(X, ConstantInt::get(Ty, ShAmtC), "",
1425+
I.isExact()));
1426+
NewAdd->setHasNoSignedWrap(
1427+
cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap());
1428+
return NewAdd;
1429+
}
1430+
}
14211431

14221432
// The one-use check is not strictly necessary, but codegen may not be
14231433
// able to invert the transform and perf may suffer with an extra mul

llvm/test/Transforms/InstCombine/ashr-lshr.ll

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -653,8 +653,8 @@ define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
653653

654654
define i32 @lshr_mul_times_3_div_2(i32 %0) {
655655
; CHECK-LABEL: @lshr_mul_times_3_div_2(
656-
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
657-
; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 1
656+
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
657+
; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
658658
; CHECK-NEXT: ret i32 [[LSHR]]
659659
;
660660
%mul = mul nsw nuw i32 %0, 3
@@ -688,8 +688,8 @@ define i32 @mul_times_3_div_2_multiuse_lshr(i32 %x) {
688688

689689
define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
690690
; CHECK-LABEL: @lshr_mul_times_3_div_2_exact_2(
691-
; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
692-
; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
691+
; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1
692+
; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]]
693693
; CHECK-NEXT: ret i32 [[LSHR]]
694694
;
695695
%mul = mul nuw i32 %x, 3

0 commit comments

Comments
 (0)