-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[AMDGPU] Speed up SIRegisterInfo::getReservedRegs #79844
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
reserveRegisterTuples is slow because it uses MCRegAliasIterator and hence ends up reserving the same aliased registers many times. This patch changes getReservedRegs not to use it for reserving SGPRs, VGPRs and AGPRs. Instead it iterates through base register classes, which should come closer to reserving each register once only. Overall this speeds up the time to run check-llvm-codegen-amdgpu in my Release build from 18.4 seconds to 16.9 seconds (all timings +/- 0.2).
@llvm/pr-subscribers-backend-amdgpu Author: Jay Foad (jayfoad) ChangesreserveRegisterTuples is slow because it uses MCRegAliasIterator and hence ends up reserving the same aliased registers many times. This patch changes getReservedRegs not to use it for reserving SGPRs, VGPRs and AGPRs. Instead it iterates through base register classes, which should come closer to reserving each register once only. Overall this speeds up the time to run check-llvm-codegen-amdgpu in my Release build from 18.4 seconds to 16.9 seconds (all timings +/- 0.2). Full diff: https://github.com/llvm/llvm-project/pull/79844.diff 5 Files Affected:
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 5a6da209e8da686..75c482d7bcaa92a 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -119,6 +119,9 @@ class TargetRegisterClass {
/// registers.
bool isAllocatable() const { return MC->isAllocatable(); }
+ /// Return true if this register class has a defined BaseClassOrder.
+ bool isBaseClass() const { return MC->isBaseClass(); }
+
/// Return true if the specified TargetRegisterClass
/// is a proper sub-class of this TargetRegisterClass.
bool hasSubClass(const TargetRegisterClass *RC) const {
diff --git a/llvm/include/llvm/MC/MCRegisterInfo.h b/llvm/include/llvm/MC/MCRegisterInfo.h
index ede01d624924622..e52f0a4ff86edfd 100644
--- a/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -46,6 +46,7 @@ class MCRegisterClass {
const uint16_t RegSizeInBits;
const int8_t CopyCost;
const bool Allocatable;
+ const bool BaseClass;
/// getID() - Return the register class ID number.
///
@@ -97,6 +98,9 @@ class MCRegisterClass {
/// isAllocatable - Return true if this register class may be used to create
/// virtual registers.
bool isAllocatable() const { return Allocatable; }
+
+ /// Return true if this register class has a defined BaseClassOrder.
+ bool isBaseClass() const { return BaseClass; }
};
/// MCRegisterDesc - This record contains information about a particular
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 8f9d95c6d4b1421..1a22b777aeda86d 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -622,9 +622,15 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
//
unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
- for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
- unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
- reserveRegisterTuples(Reserved, Reg);
+ for (const TargetRegisterClass *RC : regclasses()) {
+ if (RC->isBaseClass() && isSGPRClass(RC)) {
+ unsigned NumRegs = divideCeil(getRegSizeInBits(*RC), 32);
+ for (MCPhysReg Reg : *RC) {
+ unsigned Index = getHWRegIndex(Reg);
+ if (Index + NumRegs > MaxNumSGPRs && Index < TotalNumSGPRs)
+ Reserved.set(Reg);
+ }
+ }
}
Register ScratchRSrcReg = MFI->getScratchRSrcReg();
@@ -693,20 +699,29 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
}
}
- for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
- unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
- reserveRegisterTuples(Reserved, Reg);
+ for (const TargetRegisterClass *RC : regclasses()) {
+ if (RC->isBaseClass() && isVGPRClass(RC)) {
+ unsigned NumRegs = divideCeil(getRegSizeInBits(*RC), 32);
+ for (MCPhysReg Reg : *RC) {
+ unsigned Index = getHWRegIndex(Reg);
+ if (Index + NumRegs > MaxNumVGPRs)
+ Reserved.set(Reg);
+ }
+ }
}
- if (ST.hasMAIInsts()) {
- for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) {
- unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
- reserveRegisterTuples(Reserved, Reg);
+ // Reserve all the AGPRs if there are no instructions to use it.
+ if (!ST.hasMAIInsts())
+ MaxNumAGPRs = 0;
+ for (const TargetRegisterClass *RC : regclasses()) {
+ if (RC->isBaseClass() && isAGPRClass(RC)) {
+ unsigned NumRegs = divideCeil(getRegSizeInBits(*RC), 32);
+ for (MCPhysReg Reg : *RC) {
+ unsigned Index = getHWRegIndex(Reg);
+ if (Index + NumRegs > MaxNumAGPRs)
+ Reserved.set(Reg);
+ }
}
- } else {
- // Reserve all the AGPRs if there are no instructions to use it.
- for (MCRegister Reg : AMDGPU::AGPR_32RegClass)
- reserveRegisterTuples(Reserved, Reg);
}
// On GFX908, in order to guarantee copying between AGPRs, we need a scratch
diff --git a/llvm/unittests/CodeGen/MachineInstrTest.cpp b/llvm/unittests/CodeGen/MachineInstrTest.cpp
index 7c45d9784bb8e7a..49da0c38eefdca5 100644
--- a/llvm/unittests/CodeGen/MachineInstrTest.cpp
+++ b/llvm/unittests/CodeGen/MachineInstrTest.cpp
@@ -530,7 +530,8 @@ TEST(MachineInstrTest, SpliceOperands) {
EXPECT_EQ(MI->getOperand(8).getImm(), MachineOperand::CreateImm(4).getImm());
// test tied operands
- MCRegisterClass MRC{0, 0, 0, 0, 0, 0, 0, 0, /*Allocatable=*/true};
+ MCRegisterClass MRC{
+ 0, 0, 0, 0, 0, 0, 0, 0, /*Allocatable=*/true, /*BaseClass=*/true};
TargetRegisterClass RC{&MRC, 0, 0, {}, 0, 0, 0, 0, 0, 0, 0};
// MachineRegisterInfo will be very upset if these registers aren't
// allocatable.
diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index bed4b27d2782ed9..cff97771b7a87b8 100644
--- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -1069,7 +1069,8 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
<< RegClassStrings.get(RC.getName()) << ", " << RC.getOrder().size()
<< ", " << RCBitsSize << ", " << RC.getQualifiedIdName() << ", "
<< RegSize << ", " << RC.CopyCost << ", "
- << (RC.Allocatable ? "true" : "false") << " },\n";
+ << (RC.Allocatable ? "true" : "false") << ", "
+ << (RC.getBaseClassOrder() ? "true" : "false") << " },\n";
}
OS << "};\n\n";
@@ -1846,6 +1847,7 @@ void RegisterInfoEmitter::debugDump(raw_ostream &OS) {
OS << "\tCoveredBySubRegs: " << RC.CoveredBySubRegs << '\n';
OS << "\tAllocatable: " << RC.Allocatable << '\n';
OS << "\tAllocationPriority: " << unsigned(RC.AllocationPriority) << '\n';
+ OS << "\tBaseClassOrder: " << RC.getBaseClassOrder() << '\n';
OS << "\tRegs:";
for (const CodeGenRegister *R : RC.getMembers()) {
OS << " " << R->getName();
|
for (const TargetRegisterClass *RC : regclasses()) { | ||
if (RC->isBaseClass() && isSGPRClass(RC)) { | ||
unsigned NumRegs = divideCeil(getRegSizeInBits(*RC), 32); | ||
for (MCPhysReg Reg : *RC) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think you can get away with drop_front. Plus I still think we should move reserved regs to be stored as reserved reg units
reserveRegisterTuples is slow because it uses MCRegAliasIterator and hence ends up reserving the same aliased registers many times. This patch changes getReservedRegs not to use it for reserving SGPRs, VGPRs and AGPRs. Instead it iterates through base register classes, which should come closer to reserving each register once only.
Overall this speeds up the time to run check-llvm-codegen-amdgpu in my Release build from 18.4 seconds to 16.9 seconds (all timings +/- 0.2).