Skip to content

AMDGPU: Implement getConstValDefinedInReg and use in foldImmediate (NFC) #127482

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 36 additions & 31 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1327,6 +1327,33 @@ Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
return Reg;
}

bool SIInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
const Register Reg,
int64_t &ImmVal) const {
// TODO: Handle all the special cases handled in SIShrinkInstructions
// (e.g. s_brev_b32 imm -> reverse(imm))
switch (MI.getOpcode()) {
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::S_MOV_B32:
case AMDGPU::S_MOVK_I32:
case AMDGPU::S_MOV_B64:
case AMDGPU::V_MOV_B64_e32:
case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
case AMDGPU::S_MOV_B64_IMM_PSEUDO:
case AMDGPU::V_MOV_B64_PSEUDO: {
const MachineOperand &Src0 = MI.getOperand(1);
if (Src0.isImm()) {
ImmVal = Src0.getImm();
return MI.getOperand(0).getReg() == Reg;
}

return false;
}
default:
return false;
}
}

unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {

if (RI.isAGPRClass(DstRC))
Expand Down Expand Up @@ -3395,27 +3422,11 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (!MRI->hasOneNonDBGUse(Reg))
return false;

switch (DefMI.getOpcode()) {
default:
return false;
case AMDGPU::V_MOV_B64_e32:
case AMDGPU::S_MOV_B64:
case AMDGPU::V_MOV_B64_PSEUDO:
case AMDGPU::S_MOV_B64_IMM_PSEUDO:
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::S_MOV_B32:
case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
break;
}

const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
assert(ImmOp);
// FIXME: We could handle FrameIndex values here.
if (!ImmOp->isImm())
int64_t Imm;
if (!getConstValDefinedInReg(DefMI, Reg, Imm))
return false;

auto getImmFor = [ImmOp](const MachineOperand &UseOp) -> int64_t {
int64_t Imm = ImmOp->getImm();
auto getImmFor = [=](const MachineOperand &UseOp) -> int64_t {
switch (UseOp.getSubReg()) {
default:
return Imm;
Expand Down Expand Up @@ -3502,12 +3513,14 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
// If this is a free constant, there's no reason to do this.
// TODO: We could fold this here instead of letting SIFoldOperands do it
// later.
MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
int Src0Idx = getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::src0);

// Any src operand can be used for the legality check.
if (isInlineConstant(UseMI, *Src0, *ImmOp))
if (isInlineConstant(UseMI, Src0Idx, Imm))
return false;

MachineOperand *Src0 = &UseMI.getOperand(Src0Idx);

bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64;
bool IsFMA =
Expand Down Expand Up @@ -4267,18 +4280,11 @@ bool SIInstrInfo::isInlineConstant(const APFloat &Imm) const {
}
}

bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
uint8_t OperandType) const {
assert(!MO.isReg() && "isInlineConstant called on register operand!");
if (!MO.isImm())
return false;

bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
// MachineOperand provides no way to tell the true operand size, since it only
// records a 64-bit value. We need to know the size to determine if a 32-bit
// floating point immediate bit pattern is legal for an integer immediate. It
// would be for any 32-bit integer operand, but would not be for a 64-bit one.

int64_t Imm = MO.getImm();
switch (OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
Expand All @@ -4300,8 +4306,7 @@ bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
return AMDGPU::isInlinableLiteral64(MO.getImm(),
ST.hasInv2PiInlineImm());
return AMDGPU::isInlinableLiteral64(Imm, ST.hasInv2PiInlineImm());
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
Expand Down
22 changes: 18 additions & 4 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
MachineBasicBlock::iterator I, const DebugLoc &DL,
Register SrcReg, int Value) const;

bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg,
int64_t &ImmVal) const override;

void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
Expand Down Expand Up @@ -1063,7 +1066,13 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
// Some operands like FrameIndexes could resolve to an inline immediate value
// that will not require an additional 4-bytes; this function assumes that it
// will.
bool isInlineConstant(const MachineOperand &MO, uint8_t OperandType) const;
bool isInlineConstant(const MachineOperand &MO, uint8_t OperandType) const {
assert(!MO.isReg() && "isInlineConstant called on register operand!");
if (!MO.isImm())
return false;
return isInlineConstant(MO.getImm(), OperandType);
}
bool isInlineConstant(int64_t ImmVal, uint8_t OperandType) const;

bool isInlineConstant(const MachineOperand &MO,
const MCOperandInfo &OpInfo) const {
Expand Down Expand Up @@ -1091,7 +1100,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
}

bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand &MO) const {
int64_t ImmVal) const {
if (OpIdx >= MI.getDesc().NumOperands)
return false;

Expand All @@ -1101,10 +1110,15 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {

uint8_t OpType = (Size == 8) ?
AMDGPU::OPERAND_REG_IMM_INT64 : AMDGPU::OPERAND_REG_IMM_INT32;
return isInlineConstant(MO, OpType);
return isInlineConstant(ImmVal, OpType);
}

return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType);
return isInlineConstant(ImmVal, MI.getDesc().operands()[OpIdx].OperandType);
}

bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand &MO) const {
return isInlineConstant(MI, OpIdx, MO.getImm());
}

bool isInlineConstant(const MachineOperand &MO) const {
Expand Down