Skip to content

Commit faac57c

Browse files
committed
[RISCV] Expand constant multiplication for targets without M extension
1 parent 8b010e8 commit faac57c

19 files changed

+1702
-971
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 80 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "RISCVSelectionDAGInfo.h"
2121
#include "RISCVSubtarget.h"
2222
#include "llvm/ADT/SmallSet.h"
23+
#include "llvm/ADT/SmallVector.h"
2324
#include "llvm/ADT/Statistic.h"
2425
#include "llvm/Analysis/MemoryLocation.h"
2526
#include "llvm/Analysis/ValueTracking.h"
@@ -15436,6 +15437,73 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
1543615437
return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
1543715438
}
1543815439

15440+
// Try to expand a multiply to a sequence of shifts and add/subs,
15441+
// for a machine w/o native mul instruction.
15442+
static SDValue expandMulToBasicOps(SDNode *N, SelectionDAG &DAG,
15443+
uint64_t MulAmt) {
15444+
const uint64_t BitWidth = N->getValueType(0).getFixedSizeInBits();
15445+
SDLoc DL(N);
15446+
15447+
if (MulAmt == 0)
15448+
return DAG.getConstant(0, DL, N->getValueType(0));
15449+
15450+
// Find the Non-adjacent form of the multiplier.
15451+
llvm::SmallVector<std::pair<bool, uint64_t>> Sequence; // {isAdd, shamt}
15452+
uint64_t E = MulAmt;
15453+
uint64_t I = 0;
15454+
while (E > 0) {
15455+
if (E & 1) {
15456+
if (I >= BitWidth)
15457+
break;
15458+
int8_t Z = ((E & 3) == 1) ? 1 : -1;
15459+
Sequence.push_back({(Z == 1), I});
15460+
E -= Z;
15461+
}
15462+
E >>= 1;
15463+
I++;
15464+
}
15465+
15466+
SDValue Result = DAG.getConstant(0, DL, N->getValueType(0));
15467+
SDValue N0 = N->getOperand(0);
15468+
15469+
for (const auto &Op : Sequence) {
15470+
SDValue ShiftVal;
15471+
if (Op.second > 0)
15472+
ShiftVal =
15473+
DAG.getNode(ISD::SHL, DL, N->getValueType(0), N0,
15474+
DAG.getConstant(Op.second, DL, N->getValueType(0)));
15475+
else
15476+
ShiftVal = N0;
15477+
15478+
ISD::NodeType AddSubOp = Op.first ? ISD::ADD : ISD::SUB;
15479+
Result = DAG.getNode(AddSubOp, DL, N->getValueType(0), Result, ShiftVal);
15480+
}
15481+
15482+
return Result;
15483+
}
15484+
15485+
// 2^N +/- 2^M -> (add/sub (shl X, C1), (shl X, C2))
15486+
static SDValue expandMulToAddOrSubOfShl(SDNode *N, SelectionDAG &DAG,
15487+
uint64_t MulAmt) {
15488+
uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
15489+
ISD::NodeType Op;
15490+
if (isPowerOf2_64(MulAmt + MulAmtLowBit))
15491+
Op = ISD::SUB;
15492+
else if (isPowerOf2_64(MulAmt - MulAmtLowBit))
15493+
Op = ISD::ADD;
15494+
else
15495+
return SDValue();
15496+
uint64_t ShiftAmt1 = MulAmt + MulAmtLowBit;
15497+
SDLoc DL(N);
15498+
SDValue Shift1 =
15499+
DAG.getNode(ISD::SHL, DL, N->getValueType(0), N->getOperand(0),
15500+
DAG.getConstant(Log2_64(ShiftAmt1), DL, N->getValueType(0)));
15501+
SDValue Shift2 = DAG.getNode(
15502+
ISD::SHL, DL, N->getValueType(0), N->getOperand(0),
15503+
DAG.getConstant(Log2_64(MulAmtLowBit), DL, N->getValueType(0)));
15504+
return DAG.getNode(Op, DL, N->getValueType(0), Shift1, Shift2);
15505+
}
15506+
1543915507
// Try to expand a scalar multiply to a faster sequence.
1544015508
static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
1544115509
TargetLowering::DAGCombinerInfo &DCI,
@@ -15447,20 +15515,23 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
1544715515
if (DAG.getMachineFunction().getFunction().hasMinSize())
1544815516
return SDValue();
1544915517

15450-
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
15451-
return SDValue();
15452-
1545315518
if (VT != Subtarget.getXLenVT())
1545415519
return SDValue();
1545515520

15456-
const bool HasShlAdd =
15457-
Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
15458-
1545915521
ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
1546015522
if (!CNode)
1546115523
return SDValue();
1546215524
uint64_t MulAmt = CNode->getZExtValue();
1546315525

15526+
if (!Subtarget.hasStdExtM() && !Subtarget.hasStdExtZmmul())
15527+
return expandMulToBasicOps(N, DAG, MulAmt);
15528+
15529+
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
15530+
return SDValue();
15531+
15532+
const bool HasShlAdd =
15533+
Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
15534+
1546415535
// WARNING: The code below is knowingly incorrect with regards to undef semantics.
1546515536
// We're adding additional uses of X here, and in principle, we should be freezing
1546615537
// X before doing so. However, adding freeze here causes real regressions, and no
@@ -15569,22 +15640,7 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
1556915640
return DAG.getNode(ISD::SUB, DL, VT, Shift1, Mul359);
1557015641
}
1557115642
}
15572-
}
15573-
15574-
// 2^N - 2^M -> (sub (shl X, C1), (shl X, C2))
15575-
uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
15576-
if (isPowerOf2_64(MulAmt + MulAmtLowBit)) {
15577-
uint64_t ShiftAmt1 = MulAmt + MulAmtLowBit;
15578-
SDLoc DL(N);
15579-
SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15580-
DAG.getConstant(Log2_64(ShiftAmt1), DL, VT));
15581-
SDValue Shift2 =
15582-
DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15583-
DAG.getConstant(Log2_64(MulAmtLowBit), DL, VT));
15584-
return DAG.getNode(ISD::SUB, DL, VT, Shift1, Shift2);
15585-
}
1558615643

15587-
if (HasShlAdd) {
1558815644
for (uint64_t Divisor : {3, 5, 9}) {
1558915645
if (MulAmt % Divisor != 0)
1559015646
continue;
@@ -15610,6 +15666,9 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
1561015666
}
1561115667
}
1561215668

15669+
if (SDValue V = expandMulToAddOrSubOfShl(N, DAG, MulAmt))
15670+
return V;
15671+
1561315672
return SDValue();
1561415673
}
1561515674

llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll

Lines changed: 42 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -262,20 +262,33 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
262262
; RV64I-NEXT: sext.w a1, a0
263263
; RV64I-NEXT: beqz a1, .LBB2_2
264264
; RV64I-NEXT: # %bb.1: # %cond.false
265-
; RV64I-NEXT: addi sp, sp, -16
266-
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
267-
; RV64I-NEXT: neg a1, a0
265+
; RV64I-NEXT: negw a1, a0
268266
; RV64I-NEXT: and a0, a0, a1
269-
; RV64I-NEXT: lui a1, 30667
270-
; RV64I-NEXT: addiw a1, a1, 1329
271-
; RV64I-NEXT: call __muldi3
267+
; RV64I-NEXT: slli a1, a0, 6
268+
; RV64I-NEXT: slli a2, a0, 8
269+
; RV64I-NEXT: slli a3, a0, 10
270+
; RV64I-NEXT: slli a4, a0, 12
271+
; RV64I-NEXT: add a1, a1, a2
272+
; RV64I-NEXT: slli a2, a0, 16
273+
; RV64I-NEXT: subw a3, a3, a4
274+
; RV64I-NEXT: slli a4, a0, 18
275+
; RV64I-NEXT: subw a2, a2, a4
276+
; RV64I-NEXT: slli a4, a0, 4
277+
; RV64I-NEXT: subw a4, a0, a4
278+
; RV64I-NEXT: add a1, a4, a1
279+
; RV64I-NEXT: slli a4, a0, 14
280+
; RV64I-NEXT: subw a3, a3, a4
281+
; RV64I-NEXT: slli a4, a0, 23
282+
; RV64I-NEXT: subw a2, a2, a4
283+
; RV64I-NEXT: slli a0, a0, 27
284+
; RV64I-NEXT: add a1, a1, a3
285+
; RV64I-NEXT: add a0, a2, a0
286+
; RV64I-NEXT: add a0, a1, a0
272287
; RV64I-NEXT: srliw a0, a0, 27
273288
; RV64I-NEXT: lui a1, %hi(.LCPI2_0)
274289
; RV64I-NEXT: addi a1, a1, %lo(.LCPI2_0)
275290
; RV64I-NEXT: add a0, a1, a0
276291
; RV64I-NEXT: lbu a0, 0(a0)
277-
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
278-
; RV64I-NEXT: addi sp, sp, 16
279292
; RV64I-NEXT: ret
280293
; RV64I-NEXT: .LBB2_2:
281294
; RV64I-NEXT: li a0, 32
@@ -718,20 +731,33 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
718731
;
719732
; RV64I-LABEL: test_cttz_i32_zero_undef:
720733
; RV64I: # %bb.0:
721-
; RV64I-NEXT: addi sp, sp, -16
722-
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
723-
; RV64I-NEXT: neg a1, a0
734+
; RV64I-NEXT: negw a1, a0
724735
; RV64I-NEXT: and a0, a0, a1
725-
; RV64I-NEXT: lui a1, 30667
726-
; RV64I-NEXT: addiw a1, a1, 1329
727-
; RV64I-NEXT: call __muldi3
736+
; RV64I-NEXT: slli a1, a0, 6
737+
; RV64I-NEXT: slli a2, a0, 8
738+
; RV64I-NEXT: slli a3, a0, 10
739+
; RV64I-NEXT: slli a4, a0, 12
740+
; RV64I-NEXT: add a1, a1, a2
741+
; RV64I-NEXT: slli a2, a0, 16
742+
; RV64I-NEXT: subw a3, a3, a4
743+
; RV64I-NEXT: slli a4, a0, 18
744+
; RV64I-NEXT: subw a2, a2, a4
745+
; RV64I-NEXT: slli a4, a0, 4
746+
; RV64I-NEXT: subw a4, a0, a4
747+
; RV64I-NEXT: add a1, a4, a1
748+
; RV64I-NEXT: slli a4, a0, 14
749+
; RV64I-NEXT: subw a3, a3, a4
750+
; RV64I-NEXT: slli a4, a0, 23
751+
; RV64I-NEXT: subw a2, a2, a4
752+
; RV64I-NEXT: slli a0, a0, 27
753+
; RV64I-NEXT: add a1, a1, a3
754+
; RV64I-NEXT: add a0, a2, a0
755+
; RV64I-NEXT: add a0, a1, a0
728756
; RV64I-NEXT: srliw a0, a0, 27
729757
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
730758
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
731759
; RV64I-NEXT: add a0, a1, a0
732760
; RV64I-NEXT: lbu a0, 0(a0)
733-
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
734-
; RV64I-NEXT: addi sp, sp, 16
735761
; RV64I-NEXT: ret
736762
;
737763
; RV32M-LABEL: test_cttz_i32_zero_undef:

0 commit comments

Comments
 (0)