Skip to content

[AArch64][SME2] Improve register allocation of multi-vector SME intrinsics #116399

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ class AArch64ExpandPseudo : public MachineFunctionPass {
TargetRegisterClass ContiguousClass,
TargetRegisterClass StridedClass,
unsigned ContiguousOpc, unsigned StridedOpc);
bool expandFormTuplePseudo(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI,
unsigned Size);
bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned BitSize);

Expand Down Expand Up @@ -1142,6 +1146,32 @@ bool AArch64ExpandPseudo::expandMultiVecPseudo(
return true;
}

bool AArch64ExpandPseudo::expandFormTuplePseudo(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI, unsigned Size) {
assert(Size == 2 || Size == 4 && "Invalid Tuple Size");
MachineInstr &MI = *MBBI;
Register ReturnTuple = MI.getOperand(0).getReg();

const TargetRegisterInfo *TRI =
MBB.getParent()->getSubtarget().getRegisterInfo();
for (unsigned i = 0; i < Size; i++) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit:

Suggested change
for (unsigned i = 0; i < Size; i++) {
for (unsigned I = 0; I < Size; ++I) {

Register FormTupleOpReg = MI.getOperand(i + 1).getReg();
Register ReturnTupleSubReg =
TRI->getSubReg(ReturnTuple, AArch64::zsub0 + i);
// Add copies to ensure the subregisters remain in the correct order
// for any contigious operation they are used by.
if (FormTupleOpReg != ReturnTupleSubReg)
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORR_ZZZ))
.addReg(ReturnTupleSubReg, RegState::Define)
.addReg(FormTupleOpReg)
.addReg(FormTupleOpReg);
}

MI.eraseFromParent();
return true;
}

/// If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
Expand Down Expand Up @@ -1724,6 +1754,10 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return expandMultiVecPseudo(
MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
case AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO:
return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 2);
case AArch64::FORM_STRIDED_TUPLE_X4_PSEUDO:
return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 4);
}
return false;
}
Expand Down
77 changes: 77 additions & 0 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7639,6 +7639,11 @@ static unsigned getIntrinsicID(const SDNode *N) {
return IID;
return Intrinsic::not_intrinsic;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IID = N->getConstantOperandVal(1);
if (IID < Intrinsic::num_intrinsics)
return IID;
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can be removed?

}
}

Expand Down Expand Up @@ -8641,6 +8646,55 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
return ZExtBool;
}

bool shouldUseFormStridedPseudo(MachineInstr &MI) {
MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
bool UseFormStrided = false;
unsigned NumOperands =
MI.getOpcode() == AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO ? 2 : 4;

// The FORM_STRIDED_TUPLE pseudo should only be used if the input operands
// are copy nodes where the source register is in a StridedOrContiguous
// class. For example:
// %3:zpr2stridedorcontiguous = LD1B_2Z_IMM_PSEUDO ..
// %4:zpr = COPY %3.zsub1:zpr2stridedorcontiguous
// %5:zpr = COPY %3.zsub0:zpr2stridedorcontiguous
// %6:zpr2stridedorcontiguous = LD1B_2Z_PSEUDO ..
// %7:zpr = COPY %6.zsub1:zpr2stridedorcontiguous
// %8:zpr = COPY %6.zsub0:zpr2stridedorcontiguous
// %9:zpr2mul2 = FORM_STRIDED_TUPLE_X2_PSEUDO %5:zpr, %8:zpr

MCRegister SubReg = MCRegister::NoRegister;
for (unsigned I = 1; I < MI.getNumOperands(); ++I) {
MachineOperand &MO = MI.getOperand(I);
assert(MO.isReg() && "Unexpected operand to FORM_STRIDED_TUPLE");

MachineOperand *Def = MRI.getOneDef(MO.getReg());
if (!Def || !Def->isReg() || !Def->getParent()->isCopy()) {
UseFormStrided = false;
break;
}

MachineOperand CpyOp = Def->getParent()->getOperand(1);
MachineOperand *Ld = MRI.getOneDef(CpyOp.getReg());
unsigned OpSubReg = CpyOp.getSubReg();
if (SubReg == MCRegister::NoRegister)
SubReg = OpSubReg;
if (!Ld || !Ld->isReg() || OpSubReg != SubReg) {
UseFormStrided = false;
break;
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this can return false; directly


const TargetRegisterClass *RegClass =
NumOperands == 2 ? &AArch64::ZPR2StridedOrContiguousRegClass
: &AArch64::ZPR4StridedOrContiguousRegClass;

if (MRI.getRegClass(Ld->getReg()) == RegClass)
UseFormStrided = true;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if (MRI.getRegClass(Ld->getReg()) == RegClass)
UseFormStrided = true;
if (MRI.getRegClass(Ld->getReg()) != RegClass)
return false;

}

return UseFormStrided;
}

void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const {
// Live-in physreg copies that are glued to SMSTART are applied as
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than continue it's better to break instead, because if one of them is not a COPY then we don't need to process the other operands.

Expand All @@ -8666,6 +8720,29 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
}
}

if (MI.getOpcode() == AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO ||
MI.getOpcode() == AArch64::FORM_STRIDED_TUPLE_X4_PSEUDO) {
// If input values to the FORM_STRIDED_TUPLE pseudo aren't copies from a
// StridedOrContiguous class, fall back on REG_SEQUENCE node.
if (!shouldUseFormStridedPseudo(MI)) {
static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
AArch64::zsub2, AArch64::zsub3};

const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
TII->get(TargetOpcode::REG_SEQUENCE),
MI.getOperand(0).getReg());

for (unsigned I = 1; I < MI.getNumOperands(); ++I) {
MIB.add(MI.getOperand(I));
MIB.addImm(SubRegs[I - 1]);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit:

Suggested change
MIB.addImm(SubRegs[I - 1]);
MIB.addImm(AArch64::zsub0 + (I-1));

Then you can remove SubRegs[].

}

MI.eraseFromParent();
}
return;
}

// Add an implicit use of 'VG' for ADDXri/SUBXri, which are instructions that
// have nothing to do with VG, were it not that they are used to materialise a
// frame-address. If they contain a frame-index to a scalable vector, this
Expand Down
63 changes: 63 additions & 0 deletions llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1107,6 +1107,69 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}

// FORM_STRIDED_TUPLE nodes are created to improve register allocation where
// a consecutive multi-vector tuple is constructed from the same indices of
// multiple strided loads. This may still result in unnecessary copies between
// the loads and the tuple. Here we try to return a hint to assign the
// contiguous ZPRMulReg starting at the same register as the first operand of
// the pseudo, which should be a subregister of the first strided load.
//
// For example, if the first strided load has been assigned $z16_z20_z24_z28
// and the operands of the pseudo are each accessing subregister zsub2, we
// should look through through Order to find a contiguous register which
// begins with $z24 (i.e. $z24_z25_z26_z27).
//
bool AArch64RegisterInfo::getRegAllocationHints(
Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can be removed because AArch64RegisterInfo this == TRI.

const MachineRegisterInfo &MRI = MF.getRegInfo();
bool DefaultHints =
TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should prioritise the tuples as added below over any generic hints. That means this function should be called last.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved the TargetRegisterInfo::getRegAllocationHints call to the end of this function.


unsigned RegID = MRI.getRegClass(VirtReg)->getID();
if (RegID != AArch64::ZPR2Mul2RegClassID &&
RegID != AArch64::ZPR4Mul4RegClassID)
return DefaultHints;

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this condition can be removed (it would e.g. be equally valid for other register class with contiguous tuples, or perhaps just any register class?).

for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
if (MI.getOpcode() != AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO &&
MI.getOpcode() != AArch64::FORM_STRIDED_TUPLE_X4_PSEUDO)
continue;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I wonder if we just want to bail out early at this point?


// Look up the physical register mapped to the first load of the pseudo.
Register FirstLoadVirtReg = MI.getOperand(1).getReg();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please don't name variables with the assumption that these result from certain operations, like Loads.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Renamed this to FirstOpVirtReg.

if (!VRM->hasPhys(FirstLoadVirtReg))
continue;

int64_t SubRegIdx = -1;
MCRegister FirstLoadPhysReg = VRM->getPhys(FirstLoadVirtReg);

// The subreg number is used to access the correct unit of the
// strided register found in the map above.
SubRegIdx = MI.getOperand(1).getSubReg() - AArch64::zsub0;
if (SubRegIdx < 0 || SubRegIdx > 3)
continue;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please use a switch statement, such that explicitly only zsub0..zsub3 are supported.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After the changes suggested below to use getSubReg instead of iterating through MCRegUnits,SubRegIdx is no longer required. However, I've added the switch statement back in anyway to make sure only zsub0-zsub3 are supported.


SmallVector<Register, 4> RegUnits;
for (MCRegUnit Unit : TRI->regunits(FirstLoadPhysReg))
RegUnits.push_back(Unit);

// Find the contiguous ZPRMul register which starts with the
// same register unit as the strided register and add to Hints.
Register StartReg = RegUnits[SubRegIdx];
for (unsigned I = 0; I < Order.size(); ++I) {
Register Reg = *TRI->regunits(Order[I]).begin();
if (Reg == StartReg)
Hints.push_back(Order[I]);
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no need to iterate through all MCRegUnits for this register. It also feels rather fiddly to index into RegUnits[SubRegIdx], because it makes assumptions on the order of register units in RegUnits`.

You can do this instead using getSubReg, e.g.

MCRegister TupleStartReg = getSubReg(VRM->getPhys(FirstLoadVirtReg), MI.getOperand(1).getSubReg());
for (unsigned I = 0; I < Order.size(); ++I) 
  if (MCRegister R = getSubReg(Order[I], AArch64::zsub0))
    if (R == TupleStartReg)
      ....
}

}

return DefaultHints;
}

unsigned AArch64RegisterInfo::getLocalAddressRegister(
const MachineFunction &MF) const {
const auto &MFI = MF.getFrameInfo();
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/AArch64/AArch64RegisterInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,11 @@ class AArch64RegisterInfo final : public AArch64GenRegisterInfo {
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const override;

bool getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF, const VirtRegMap *VRM,
const LiveRegMatrix *Matrix) const override;

unsigned getLocalAddressRegister(const MachineFunction &MF) const;
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const;

Expand Down
18 changes: 16 additions & 2 deletions llvm/lib/Target/AArch64/SMEInstrFormats.td
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,20 @@ def tileslicerange0s4 : ComplexPattern<i32, 2, "SelectSMETileSlice<0, 4>", []>;

def am_sme_indexed_b4 :ComplexPattern<iPTR, 2, "SelectAddrModeIndexedSVE<0,15>", [], [SDNPWantRoot]>;

def FORM_STRIDED_TUPLE_X2_PSEUDO :
Pseudo<(outs ZPR2Mul2:$tup),
(ins ZPR:$zn0, ZPR:$zn1), []>, Sched<[]>{
let hasSideEffects = 0;
let hasPostISelHook = 1;
}

def FORM_STRIDED_TUPLE_X4_PSEUDO :
Pseudo<(outs ZPR4Mul4:$tup),
(ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>, Sched<[]>{
let hasSideEffects = 0;
let hasPostISelHook = 1;
}

def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>;
def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore,
[SDNPHasChain, SDNPSideEffect, SDNPMayLoad]>;
Expand Down Expand Up @@ -172,14 +186,14 @@ class SME2_ZA_TwoOp_VG2_Multi_Index_Pat<string name, SDPatternOperator intrinsic
Operand imm_ty, ComplexPattern tileslice>
: Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)), vt:$Zn1, vt:$Zn2, vt:$Zm, (i32 imm_ty:$i)),
(!cast<Instruction>(name # _PSEUDO) $base, $offset,
(REG_SEQUENCE ZPR2Mul2, vt:$Zn1, zsub0, vt:$Zn2, zsub1), zpr_ty:$Zm, imm_ty:$i)>;
(FORM_STRIDED_TUPLE_X2_PSEUDO vt:$Zn1,vt:$Zn2), zpr_ty:$Zm, imm_ty:$i)>;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than creating new patterns, can we just update the existing ones? Then maybe other instructions (that use the same pattern class) could also benefit.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are a number of other intrinsics which use these patterns other than sdot/udot, which is why I initially added a new pattern. However, since we will fall back on REG_SEQUENCE anyway if the expected copy instructions are not found I think we can just update the existing one.


class SME2_ZA_TwoOp_VG4_Multi_Index_Pat<string name, SDPatternOperator intrinsic, Operand index_ty, ZPRRegOp zpr_ty, ValueType vt,
Operand imm_ty, ComplexPattern tileslice>
: Pat<(intrinsic (i32 (tileslice MatrixIndexGPR32Op8_11:$base, index_ty:$offset)),
vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4, vt:$Zm, (i32 imm_ty:$i)),
(!cast<Instruction>(name # _PSEUDO) $base, $offset,
(REG_SEQUENCE ZPR4Mul4, vt:$Zn1, zsub0, vt:$Zn2, zsub1, vt:$Zn3, zsub2, vt:$Zn4, zsub3),
(FORM_STRIDED_TUPLE_X4_PSEUDO vt:$Zn1, vt:$Zn2, vt:$Zn3, vt:$Zn4),
zpr_ty:$Zm, imm_ty:$i)>;

class SME2_Sat_Shift_VG2_Pat<string name, SDPatternOperator intrinsic, ValueType out_vt, ValueType in_vt, Operand imm_ty>
Expand Down
Loading