-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[CalcSpillWeights] don't mark live intervals with spillable inlineasm ops as having infinite spill weight #70747
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-regalloc @llvm/pr-subscribers-llvm-ir Author: Nick Desaulniers (nickdesaulniers) ChangesThis is necessary for RegAllocGreedy support for memory folding inline Thanks to @qcolombet for the suggestion. Link: #20571 Full diff: https://github.com/llvm/llvm-project/pull/70747.diff 5 Files Affected:
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 4877f43e8578d1c..93e8ff389d65673 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1364,6 +1364,9 @@ class MachineInstr
return getOpcode() == TargetOpcode::INLINEASM ||
getOpcode() == TargetOpcode::INLINEASM_BR;
}
+ /// Returns true if the memory operand can be folded. Does so by checking the
+ /// InlineAsm::Flag immediate operand at OpId - 1.
+ bool mayFoldInlineAsmMemOp(unsigned OpId) const;
bool isStackAligningInlineAsm() const;
InlineAsm::AsmDialect getInlineAsmDialect() const;
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 969ad42816a7e52..2d395a53608b0b7 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -291,18 +291,23 @@ class InlineAsm final : public Value {
// Bits 30-16 - A ConstraintCode:: value indicating the original
// constraint code. (MemConstraintCode)
// Else:
- // Bits 30-16 - The register class ID to use for the operand. (RegClass)
+ // Bits 29-16 - The register class ID to use for the operand. (RegClass)
+ // Bit 30 - If the register is permitted to be spilled.
+ // (RegMayBeSpilled)
+ // Defaults to false "r", may be set for constraints like
+ // "rm" (or "g").
//
- // As such, MatchedOperandNo, MemConstraintCode, and RegClass are views of
- // the same slice of bits, but are mutually exclusive depending on the
- // fields IsMatched then KindField.
+ // As such, MatchedOperandNo, MemConstraintCode, and
+ // (RegClass+RegMayBeSpilled) are views of the same slice of bits, but are
+ // mutually exclusive depending on the fields IsMatched then KindField.
class Flag {
uint32_t Storage;
using KindField = Bitfield::Element<Kind, 0, 3, Kind::Func>;
using NumOperands = Bitfield::Element<unsigned, 3, 13>;
using MatchedOperandNo = Bitfield::Element<unsigned, 16, 15>;
using MemConstraintCode = Bitfield::Element<ConstraintCode, 16, 15, ConstraintCode::Max>;
- using RegClass = Bitfield::Element<unsigned, 16, 15>;
+ using RegClass = Bitfield::Element<unsigned, 16, 14>;
+ using RegMayBeSpilled = Bitfield::Element<bool, 30, 1>;
using IsMatched = Bitfield::Element<bool, 31, 1>;
@@ -413,6 +418,26 @@ class InlineAsm final : public Value {
"Flag is not a memory or function constraint!");
Bitfield::set<MemConstraintCode>(Storage, ConstraintCode::Unknown);
}
+
+ /// Set a bit to denote that while this operand is some kind of register
+ /// (use, def, ...), a memory flag did appear in the original constraint
+ /// list. This is set by the instruction selection framework, and consumed
+ /// by the register allocator. While the register allocator is generally
+ /// responsible for spilling registers, we need to be able to distinguish
+ /// between registers that the register allocator has permission to spill
+ /// ("rm") vs ones it does not ("r"). This is because the inline asm may use
+ /// instructions which don't support memory addressing modes for that
+ /// operand.
+ void setRegMayBeSpilled(bool B) {
+ assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
+ "Must be reg");
+ Bitfield::set<RegMayBeSpilled>(Storage, B);
+ }
+ bool getRegMayBeSpilled() const {
+ assert((isRegDefKind() || isRegDefEarlyClobberKind() || isRegUseKind()) &&
+ "Must be reg");
+ return Bitfield::get<RegMayBeSpilled>(Storage);
+ }
};
static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
diff --git a/llvm/lib/CodeGen/CalcSpillWeights.cpp b/llvm/lib/CodeGen/CalcSpillWeights.cpp
index 6e98e2384ef975f..f446e11427e75d4 100644
--- a/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -146,6 +146,17 @@ void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &LI) {
LI.setWeight(Weight);
}
+static bool canMemFoldInlineAsm(LiveInterval &LI,
+ const MachineRegisterInfo &MRI) {
+ for (const MachineOperand &MO : MRI.reg_operands(LI.reg())) {
+ const MachineInstr *MI = MO.getParent();
+ if (MI->isInlineAsm() && MI->mayFoldInlineAsmMemOp(MI->getOperandNo(&MO)))
+ return true;
+ }
+
+ return false;
+}
+
float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
SlotIndex *End) {
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -315,7 +326,7 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
// into instruction itself makes perfect sense.
if (ShouldUpdateLI && LI.isZeroLength(LIS.getSlotIndexes()) &&
!LI.isLiveAtIndexes(LIS.getRegMaskSlots()) &&
- !isLiveAtStatepointVarArg(LI)) {
+ !isLiveAtStatepointVarArg(LI) && !canMemFoldInlineAsm(LI, MRI)) {
LI.markNotSpillable();
return -1.0;
}
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index 048563cc2bcc4e4..92c789e85a205b4 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1792,6 +1792,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (F.isUseOperandTiedToDef(TiedTo))
OS << " tiedto:$" << TiedTo;
+ if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
+ F.isRegUseKind()) &&
+ F.getRegMayBeSpilled()) {
+ OS << " spillable";
+ }
+
OS << ']';
// Compute the index of the next operand descriptor.
@@ -2526,3 +2532,20 @@ void MachineInstr::insert(mop_iterator InsertBefore,
tieOperands(Tie1, Tie2);
}
}
+
+bool MachineInstr::mayFoldInlineAsmMemOp(unsigned OpId) const {
+ assert(OpId && "expected non-zero operand id");
+ assert(isInlineAsm() && "should only be used on inline asm");
+
+ if (!getOperand(OpId).isReg())
+ return false;
+
+ const MachineOperand &MD = getOperand(OpId - 1);
+ if (!MD.isImm())
+ return false;
+
+ InlineAsm::Flag F(MD.getImm());
+ if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
+ return F.getRegMayBeSpilled();
+ return false;
+}
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index fe7efb73a2dce83..bcf9105ea64ca96 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1639,6 +1639,10 @@ std::string TargetInstrInfo::createMIROperandComment(
if (F.isUseOperandTiedToDef(TiedTo))
OS << " tiedto:$" << TiedTo;
+ if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
+ F.getRegMayBeSpilled())
+ OS << " spillable";
+
return OS.str();
}
|
Note to reviewers; this is stacked on top of #70738. As such, keeping it a draft until that has landed. |
for (const MachineOperand &MO : MRI.reg_operands(LI.reg())) { | ||
const MachineInstr *MI = MO.getParent(); | ||
if (MI->isInlineAsm() && MI->mayFoldInlineAsmMemOp(MI->getOperandNo(&MO))) | ||
return true; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What happens if a register is both foldable and not foldable?
I.e., something like asm "rm, r", %0, %0
.
Given the second constraints force %0
to stay in register, we can't fold right.
In other words, depending on the semantic that we want, we may want to check all the operands.
Now, I still approved this PR because although the resulting LI
won't actually be spillable in this case, regalloc will still handle it correctly.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What happens if a register is both foldable and not foldable?
I.e., something like asm "rm, r", %0, %0.
I'm not sure that can be expressed from inline asm.
Do you mean something like:
asm ("# %0 %1"::"rm"(x), "r"(x));
?
I that case, %0
would have a distinct storage location from %1
. x
would be copied into the inline asm twice. %0
's constraint is rm
(register OR memory), %1
's constraint is r
(register). I don't think AND NOT is expressible via the constraint language.
… ops as having infinite spill weight This is necessary for RegAllocGreedy support for memory folding inline asm that uses "rm" constraints. Thanks to @qcolombet for the suggestion. Link: llvm#20571
7fb724f
to
fc89372
Compare
rebased on top of 778a484. Will land after another green presubmit run. |
This is necessary for RegAllocGreedy support for memory folding inline
asm that uses "rm" constraints.
Thanks to @qcolombet for the suggestion.
Link: #20571