Skip to content

[X86] Use Register in X86SpeculativeLoadHardening.cpp. NFC #130905

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 12, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 35 additions & 35 deletions llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,8 @@ class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {

/// Manages the predicate state traced through the program.
struct PredState {
unsigned InitialReg = 0;
unsigned PoisonReg = 0;
Register InitialReg;
Register PoisonReg;

const TargetRegisterClass *RC;
MachineSSAUpdater SSA;
Expand Down Expand Up @@ -177,7 +177,7 @@ class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {

void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);

unsigned saveEFLAGS(MachineBasicBlock &MBB,
Register saveEFLAGS(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc);
void restoreEFLAGS(MachineBasicBlock &MBB,
Expand All @@ -186,28 +186,28 @@ class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {

void mergePredStateIntoSP(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc, unsigned PredStateReg);
unsigned extractPredStateFromSP(MachineBasicBlock &MBB,
const DebugLoc &Loc, Register PredStateReg);
Register extractPredStateFromSP(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc);

void
hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
MachineOperand &IndexMO,
SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
MachineInstr *
sinkPostLoadHardenedInst(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
bool canHardenRegister(Register Reg);
unsigned hardenValueInRegister(Register Reg, MachineBasicBlock &MBB,
Register hardenValueInRegister(Register Reg, MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc);
unsigned hardenPostLoad(MachineInstr &MI);
Register hardenPostLoad(MachineInstr &MI);
void hardenReturnInstr(MachineInstr &MI);
void tracePredStateThroughCall(MachineInstr &MI);
void hardenIndirectCallOrJumpInstr(
MachineInstr &MI,
SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
};

} // end anonymous namespace
Expand Down Expand Up @@ -743,7 +743,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(

// We will wire each cmov to each other, but need to start with the
// incoming pred state.
unsigned CurStateReg = PS->InitialReg;
Register CurStateReg = PS->InitialReg;

for (X86::CondCode Cond : Conds) {
int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
Expand Down Expand Up @@ -986,7 +986,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
// No terminator or non-branch terminator.
continue;

unsigned TargetReg;
Register TargetReg;

switch (TI.getOpcode()) {
default:
Expand Down Expand Up @@ -1265,9 +1265,9 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;

SmallSet<unsigned, 16> HardenedAddrRegs;
SmallSet<Register, 16> HardenedAddrRegs;

SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg;
SmallDenseMap<Register, Register, 32> AddrRegToHardenedReg;

// Track the set of load-dependent registers through the basic block. Because
// the values of these registers have an existing data dependency on a loaded
Expand Down Expand Up @@ -1297,11 +1297,11 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
// FIXME: Do a more careful analysis of x86 to build a conservative
// model here.
if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
return Op.isReg() && LoadDepRegs.test(Op.getReg());
return Op.isReg() && LoadDepRegs.test(Op.getReg().id());
}))
for (MachineOperand &Def : MI.defs())
if (Def.isReg())
LoadDepRegs.set(Def.getReg());
LoadDepRegs.set(Def.getReg().id());

// Both Intel and AMD are guiding that they will change the semantics of
// LFENCE to be a speculation barrier, so if we see an LFENCE, there is
Expand Down Expand Up @@ -1333,11 +1333,11 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(

// If we have at least one (non-frame-index, non-RIP) register operand,
// and neither operand is load-dependent, we need to check the load.
unsigned BaseReg = 0, IndexReg = 0;
Register BaseReg, IndexReg;
if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
BaseMO.getReg() != X86::NoRegister)
BaseMO.getReg().isValid())
BaseReg = BaseMO.getReg();
if (IndexMO.getReg() != X86::NoRegister)
if (IndexMO.getReg().isValid())
IndexReg = IndexMO.getReg();

if (!BaseReg && !IndexReg)
Expand All @@ -1348,8 +1348,8 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
// needn't check it.
// FIXME: Is this true in the case where we are hardening loads after
// they complete? Unclear, need to investigate.
if ((BaseReg && LoadDepRegs.test(BaseReg)) ||
(IndexReg && LoadDepRegs.test(IndexReg)))
if ((BaseReg && LoadDepRegs.test(BaseReg.id())) ||
(IndexReg && LoadDepRegs.test(IndexReg.id())))
continue;

// If post-load hardening is enabled, this load is compatible with
Expand Down Expand Up @@ -1378,7 +1378,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(

for (MachineOperand &Def : MI.defs())
if (Def.isReg())
LoadDepRegs.set(Def.getReg());
LoadDepRegs.set(Def.getReg().id());
}

// Now re-walk the instructions in the basic block, and apply whichever
Expand Down Expand Up @@ -1433,7 +1433,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
}
}

unsigned HardenedReg = hardenPostLoad(MI);
Register HardenedReg = hardenPostLoad(MI);

// Mark the resulting hardened register as such so we don't re-harden.
AddrRegToHardenedReg[HardenedReg] = HardenedReg;
Expand Down Expand Up @@ -1489,7 +1489,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
/// Note that LLVM can only lower very simple patterns of saved and restored
/// EFLAGS registers. The restore should always be within the same basic block
/// as the save so that no PHI nodes are inserted.
unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
Register X86SpeculativeLoadHardeningPass::saveEFLAGS(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc) {
// FIXME: Hard coding this to a 32-bit register class seems weird, but matches
Expand Down Expand Up @@ -1520,7 +1520,7 @@ void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
/// across normal stack adjustments.
void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc, unsigned PredStateReg) {
const DebugLoc &Loc, Register PredStateReg) {
Register TmpReg = MRI->createVirtualRegister(PS->RC);
// FIXME: This hard codes a shift distance based on the number of bits needed
// to stay canonical on 64-bit. We should compute this somehow and support
Expand All @@ -1538,7 +1538,7 @@ void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
}

/// Extracts the predicate state stored in the high bits of the stack pointer.
unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
Register X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc) {
Register PredStateReg = MRI->createVirtualRegister(PS->RC);
Expand All @@ -1561,7 +1561,7 @@ unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(

void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
MachineBasicBlock &MBB = *MI.getParent();
const DebugLoc &Loc = MI.getDebugLoc();

Expand Down Expand Up @@ -1640,7 +1640,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
// If EFLAGS are live and we don't have access to instructions that avoid
// clobbering EFLAGS we need to save and restore them. This in turn makes
// the EFLAGS no longer live.
unsigned FlagsReg = 0;
Register FlagsReg;
if (EFLAGSLive && !Subtarget->hasBMI2()) {
EFLAGSLive = false;
FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
Expand Down Expand Up @@ -1898,7 +1898,7 @@ bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) {
///
/// The new, hardened virtual register is returned. It will have the same
/// register class as `Reg`.
unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
Register X86SpeculativeLoadHardeningPass::hardenValueInRegister(
Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc) {
assert(canHardenRegister(Reg) && "Cannot harden this register!");
Expand All @@ -1919,7 +1919,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
StateReg = NarrowStateReg;
}

unsigned FlagsReg = 0;
Register FlagsReg;
if (isEFLAGSLive(MBB, InsertPt, *TRI))
FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);

Expand Down Expand Up @@ -1948,7 +1948,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
/// execution and coercing them to one is sufficient.
///
/// Returns the newly hardened register.
unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
Register X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
const DebugLoc &Loc = MI.getDebugLoc();

Expand All @@ -1965,7 +1965,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
// Now harden this register's value, getting a hardened reg that is safe to
// use. Note that we insert the instructions to compute this *after* the
// defining instruction, not before it.
unsigned HardenedReg = hardenValueInRegister(
Register HardenedReg = hardenValueInRegister(
UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);

// Finally, replace the old register (which now only has the uses of the
Expand Down Expand Up @@ -2089,7 +2089,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
MI.setPostInstrSymbol(MF, RetSymbol);

const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
unsigned ExpectedRetAddrReg = 0;
Register ExpectedRetAddrReg;

// If we have no red zones or if the function returns twice (possibly without
// using the `ret` instruction) like setjmp, we need to save the expected
Expand Down Expand Up @@ -2148,7 +2148,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
}

// Now we extract the callee's predicate state from the stack pointer.
unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
Register NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);

// Test the expected return address against our actual address. If we can
// form this basic block's address as an immediate, this is easy. Otherwise
Expand Down Expand Up @@ -2207,7 +2207,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
/// not already flagged and add them.
void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
MachineInstr &MI,
SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
switch (MI.getOpcode()) {
case X86::FARCALL16m:
case X86::FARCALL32m:
Expand Down Expand Up @@ -2240,7 +2240,7 @@ void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
// Try to lookup a hardened version of this register. We retain a reference
// here as we want to update the map to track any newly computed hardened
// register.
unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
Register &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];

// If we don't have a hardened register yet, compute one. Otherwise, just use
// the already hardened register.
Expand Down