Skip to content

[RISCV] Add MachineCombiner to fold (sh3add Z, (add X, (slli Y, 6))) -> (sh3add (sh3add Y, Z), X). #87884

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions llvm/include/llvm/CodeGen/MachineCombinerPattern.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,8 @@ enum class MachineCombinerPattern {
FMADD_XA,
FMSUB,
FNMSUB,
SHXADD_ADD_SLLI_OP1,
SHXADD_ADD_SLLI_OP2,

// X86 VNNI
DPWSSD,
Expand Down
151 changes: 151 additions & 0 deletions llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1829,13 +1829,96 @@ static bool getFPPatterns(MachineInstr &Root,
return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
}

/// Utility routine that checks if \param MO is defined by an
/// \param CombineOpc instruction in the basic block \param MBB
static const MachineInstr *canCombine(const MachineBasicBlock &MBB,
const MachineOperand &MO,
unsigned CombineOpc) {
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const MachineInstr *MI = nullptr;

if (MO.isReg() && MO.getReg().isVirtual())
MI = MRI.getUniqueVRegDef(MO.getReg());
// And it needs to be in the trace (otherwise, it won't have a depth).
if (!MI || MI->getParent() != &MBB || MI->getOpcode() != CombineOpc)
return nullptr;
// Must only used by the user we combine with.
if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
return nullptr;

return MI;
}

/// Utility routine that checks if \param MO is defined by a SLLI in \param
/// MBB that can be combined by splitting across 2 SHXADD instructions. The
/// first SHXADD shift amount is given by \param OuterShiftAmt.
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB,
const MachineOperand &MO,
unsigned OuterShiftAmt) {
const MachineInstr *ShiftMI = canCombine(MBB, MO, RISCV::SLLI);
if (!ShiftMI)
return false;

unsigned InnerShiftAmt = ShiftMI->getOperand(2).getImm();
if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
return false;

return true;
}

// Returns the shift amount from a SHXADD instruction. Returns 0 if the
// instruction is not a SHXADD.
static unsigned getSHXADDShiftAmount(unsigned Opc) {
switch (Opc) {
default:
return 0;
case RISCV::SH1ADD:
return 1;
case RISCV::SH2ADD:
return 2;
case RISCV::SH3ADD:
return 3;
}
}

// Look for opportunities to combine (sh3add Z, (add X, (slli Y, 5))) into
// (sh3add (sh2add Y, Z), X).
static bool
getSHXADDPatterns(const MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) {
unsigned ShiftAmt = getSHXADDShiftAmount(Root.getOpcode());
if (!ShiftAmt)
return false;

const MachineBasicBlock &MBB = *Root.getParent();

const MachineInstr *AddMI = canCombine(MBB, Root.getOperand(2), RISCV::ADD);
if (!AddMI)
return false;

bool Found = false;
if (canCombineShiftIntoShXAdd(MBB, AddMI->getOperand(1), ShiftAmt)) {
Patterns.push_back(MachineCombinerPattern::SHXADD_ADD_SLLI_OP1);
Found = true;
}
if (canCombineShiftIntoShXAdd(MBB, AddMI->getOperand(2), ShiftAmt)) {
Patterns.push_back(MachineCombinerPattern::SHXADD_ADD_SLLI_OP2);
Found = true;
}

return Found;
}

bool RISCVInstrInfo::getMachineCombinerPatterns(
MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
bool DoRegPressureReduce) const {

if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
return true;

if (getSHXADDPatterns(Root, Patterns))
return true;

return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
DoRegPressureReduce);
}
Expand Down Expand Up @@ -1918,6 +2001,68 @@ static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
DelInstrs.push_back(&Root);
}

// Combine patterns like (sh3add Z, (add X, (slli Y, 5))) to
// (sh3add (sh2add Y, Z), X) if the shift amount can be split across two
// shXadd instructions. The outer shXadd keeps its original opcode.
static void
genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {
MachineFunction *MF = Root.getMF();
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();

unsigned OuterShiftAmt = getSHXADDShiftAmount(Root.getOpcode());
assert(OuterShiftAmt != 0 && "Unexpected opcode");

MachineInstr *AddMI = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
MachineInstr *ShiftMI =
MRI.getUniqueVRegDef(AddMI->getOperand(AddOpIdx).getReg());

unsigned InnerShiftAmt = ShiftMI->getOperand(2).getImm();
assert(InnerShiftAmt > OuterShiftAmt && "Unexpected shift amount");

unsigned InnerOpc;
switch (InnerShiftAmt - OuterShiftAmt) {
default:
llvm_unreachable("Unexpected shift amount");
case 0:
InnerOpc = RISCV::ADD;
break;
case 1:
InnerOpc = RISCV::SH1ADD;
break;
case 2:
InnerOpc = RISCV::SH2ADD;
break;
case 3:
InnerOpc = RISCV::SH3ADD;
break;
}

const MachineOperand &X = AddMI->getOperand(3 - AddOpIdx);
const MachineOperand &Y = ShiftMI->getOperand(1);
const MachineOperand &Z = Root.getOperand(1);

Register NewVR = MRI.createVirtualRegister(&RISCV::GPRRegClass);

auto MIB1 = BuildMI(*MF, MIMetadata(Root), TII->get(InnerOpc), NewVR)
.addReg(Y.getReg(), getKillRegState(Y.isKill()))
.addReg(Z.getReg(), getKillRegState(Z.isKill()));
auto MIB2 = BuildMI(*MF, MIMetadata(Root), TII->get(Root.getOpcode()),
Root.getOperand(0).getReg())
.addReg(NewVR, RegState::Kill)
.addReg(X.getReg(), getKillRegState(X.isKill()));

InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
InsInstrs.push_back(MIB1);
InsInstrs.push_back(MIB2);
DelInstrs.push_back(ShiftMI);
DelInstrs.push_back(AddMI);
DelInstrs.push_back(&Root);
}

void RISCVInstrInfo::genAlternativeCodeSequence(
MachineInstr &Root, MachineCombinerPattern Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
Expand All @@ -1941,6 +2086,12 @@ void RISCVInstrInfo::genAlternativeCodeSequence(
combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
return;
}
case MachineCombinerPattern::SHXADD_ADD_SLLI_OP1:
genShXAddAddShift(Root, 1, InsInstrs, DelInstrs, InstrIdxForVirtReg);
return;
case MachineCombinerPattern::SHXADD_ADD_SLLI_OP2:
genShXAddAddShift(Root, 2, InsInstrs, DelInstrs, InstrIdxForVirtReg);
return;
}
}

Expand Down
40 changes: 16 additions & 24 deletions llvm/test/CodeGen/RISCV/rv64zba.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1404,9 +1404,8 @@ define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
;
; RV64ZBA-LABEL: sh6_sh3_add2:
; RV64ZBA: # %bb.0: # %entry
; RV64ZBA-NEXT: slli a1, a1, 6
; RV64ZBA-NEXT: add a0, a1, a0
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ret
entry:
%shl = shl i64 %z, 3
Expand Down Expand Up @@ -2111,9 +2110,8 @@ define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 %idx2) {
;
; RV64ZBA-LABEL: array_index_sh1_sh3:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli a1, a1, 4
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: sh1add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
%a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
Expand Down Expand Up @@ -2174,9 +2172,8 @@ define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 %idx2) {
;
; RV64ZBA-LABEL: array_index_sh2_sh2:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli a1, a1, 4
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh2add a0, a2, a0
; RV64ZBA-NEXT: sh2add a1, a1, a2
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
%a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
Expand All @@ -2196,9 +2193,8 @@ define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 %idx2) {
;
; RV64ZBA-LABEL: array_index_sh2_sh3:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli a1, a1, 5
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: sh2add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
%a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
Expand Down Expand Up @@ -2238,9 +2234,8 @@ define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 %idx2) {
;
; RV64ZBA-LABEL: array_index_sh3_sh1:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli a1, a1, 4
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh1add a0, a2, a0
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh1add a0, a1, a0
; RV64ZBA-NEXT: lh a0, 0(a0)
; RV64ZBA-NEXT: ret
%a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
Expand All @@ -2260,9 +2255,8 @@ define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 %idx2) {
;
; RV64ZBA-LABEL: array_index_sh3_sh2:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli a1, a1, 5
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh2add a0, a2, a0
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh2add a0, a1, a0
; RV64ZBA-NEXT: lw a0, 0(a0)
; RV64ZBA-NEXT: ret
%a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
Expand All @@ -2282,9 +2276,8 @@ define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
;
; RV64ZBA-LABEL: array_index_sh3_sh3:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: slli a1, a1, 6
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
%a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
Expand All @@ -2308,9 +2301,8 @@ define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64ZBA-LABEL: array_index_lshr_sh3_sh3:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: srli a1, a1, 58
; RV64ZBA-NEXT: slli a1, a1, 6
; RV64ZBA-NEXT: add a0, a0, a1
; RV64ZBA-NEXT: sh3add a0, a2, a0
; RV64ZBA-NEXT: sh3add a1, a1, a2
; RV64ZBA-NEXT: sh3add a0, a1, a0
; RV64ZBA-NEXT: ld a0, 0(a0)
; RV64ZBA-NEXT: ret
%shr = lshr i64 %idx1, 58
Expand Down