Skip to content

[AMDGPU] Rematerialize scalar loads #68778

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
106 changes: 105 additions & 1 deletion llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,27 @@ static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
}

static bool canRemat(const MachineInstr &MI) {

if (SIInstrInfo::isVOP1(MI) || SIInstrInfo::isVOP2(MI) ||
SIInstrInfo::isVOP3(MI) || SIInstrInfo::isSDWA(MI) ||
SIInstrInfo::isSALU(MI))
return true;

if (SIInstrInfo::isSMRD(MI)) {
return !MI.memoperands_empty() &&
llvm::all_of(MI.memoperands(), [](const MachineMemOperand *MMO) {
return MMO->isLoad() && MMO->isInvariant();
});
}

return false;
}

bool SIInstrInfo::isReallyTriviallyReMaterializable(
const MachineInstr &MI) const {
if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI) || isSALU(MI)) {

if (canRemat(MI)) {
// Normally VALU use of exec would block the rematerialization, but that
// is OK in this case to have an implicit exec read as all VALU do.
// We really want all of the generic logic for this except for this.
Expand Down Expand Up @@ -2434,6 +2452,92 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
return true;
}

void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, Register DestReg,
unsigned SubIdx, const MachineInstr &Orig,
const TargetRegisterInfo &RI) const {

// Try shrinking the instruction to remat only the part needed for current
// context.
// TODO: Handle more cases.
unsigned Opcode = Orig.getOpcode();
switch (Opcode) {
case AMDGPU::S_LOAD_DWORDX16_IMM:
case AMDGPU::S_LOAD_DWORDX8_IMM: {
if (SubIdx != 0)
break;

if (I == MBB.end())
break;

if (I->isBundled())
break;

// Look for a single use of the register that is also a subreg.
Register RegToFind = Orig.getOperand(0).getReg();
MachineOperand *UseMO = nullptr;
for (auto &CandMO : I->operands()) {
if (!CandMO.isReg() || CandMO.getReg() != RegToFind || CandMO.isDef())
continue;
if (UseMO) {
UseMO = nullptr;
break;
}
UseMO = &CandMO;
}
if (!UseMO || UseMO->getSubReg() == AMDGPU::NoSubRegister)
break;

unsigned Offset = RI.getSubRegIdxOffset(UseMO->getSubReg());
unsigned SubregSize = RI.getSubRegIdxSize(UseMO->getSubReg());

MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
assert(MRI.use_nodbg_empty(DestReg) && "DestReg should have no users yet.");

unsigned NewOpcode = -1;
if (SubregSize == 256)
NewOpcode = AMDGPU::S_LOAD_DWORDX8_IMM;
else if (SubregSize == 128)
NewOpcode = AMDGPU::S_LOAD_DWORDX4_IMM;
else
break;

const MCInstrDesc &TID = get(NewOpcode);
const TargetRegisterClass *NewRC =
RI.getAllocatableClass(getRegClass(TID, 0, &RI, *MF));
MRI.setRegClass(DestReg, NewRC);

UseMO->setReg(DestReg);
UseMO->setSubReg(AMDGPU::NoSubRegister);

// Use a smaller load with the desired size, possibly with updated offset.
MachineInstr *MI = MF->CloneMachineInstr(&Orig);
MI->setDesc(TID);
MI->getOperand(0).setReg(DestReg);
MI->getOperand(0).setSubReg(AMDGPU::NoSubRegister);
if (Offset) {
MachineOperand *OffsetMO = getNamedOperand(*MI, AMDGPU::OpName::offset);
int64_t FinalOffset = OffsetMO->getImm() + Offset / 8;
OffsetMO->setImm(FinalOffset);
}
SmallVector<MachineMemOperand *> NewMMOs;
for (const MachineMemOperand *MemOp : Orig.memoperands())
NewMMOs.push_back(MF->getMachineMemOperand(MemOp, MemOp->getPointerInfo(),
SubregSize / 8));
MI->setMemRefs(*MF, NewMMOs);

MBB.insert(I, MI);
return;
}

default:
break;
}

TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, RI);
}

std::pair<MachineInstr*, MachineInstr*>
SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,11 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {

bool expandPostRAPseudo(MachineInstr &MI) const override;

void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const override;

// Splits a V_MOV_B64_DPP_PSEUDO opcode into a pair of v_mov_b32_dpp
// instructions. Returns a pair of generated instructions.
// Can split either post-RA with physical registers or pre-RA with
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/hsa-metadata-kernel-code-props-v3.ll
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ entry:
}

; CHECK: .name: num_spilled_sgprs
; GFX700: .sgpr_spill_count: 38
; GFX803: .sgpr_spill_count: 22
; GFX700: .sgpr_spill_count: 12
; GFX803: .sgpr_spill_count: 12
; GFX900: .sgpr_spill_count: 48
; GFX1010: .sgpr_spill_count: 48
; CHECK: .symbol: num_spilled_sgprs.kd
Expand Down
Loading