Skip to content

[CodeGen] Use LocationSize for MMO getSize #84751

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions llvm/include/llvm/Analysis/MemoryLocation.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,14 @@ class LocationSize {
return Value == Other.Value;
}

bool operator==(const TypeSize &Other) const {
return hasValue() && getValue() == Other;
}

bool operator!=(const LocationSize &Other) const { return !(*this == Other); }

bool operator!=(const TypeSize &Other) const { return !(*this == Other); }

// Ordering operators are not provided, since it's unclear if there's only one
// reasonable way to compare:
// - values that don't exist against values that do, and
Expand Down Expand Up @@ -293,8 +299,9 @@ class MemoryLocation {

// Return the exact size if the exact size is known at compiletime,
// otherwise return MemoryLocation::UnknownSize.
static uint64_t getSizeOrUnknown(const TypeSize &T) {
return T.isScalable() ? UnknownSize : T.getFixedValue();
static LocationSize getSizeOrUnknown(const TypeSize &T) {
return T.isScalable() ? LocationSize::beforeOrAfterPointer()
: LocationSize::precise(T.getFixedValue());
}

MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -647,15 +647,15 @@ bool GIMatchTableExecutor::executeMatchTable(

unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should Size not be unsigned here?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you mean it should be a TypeSize? I think this is a bit of code likely to need to change as GISel learns about scalable vectors (with the >=/<=). I think most of GISel should assume the memory sizes cannot be unknown for these nodes.

if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
MMO->getSizeInBits() != Size) {
MMO->getSizeInBits().getValue() != Size) {
if (handleReject() == RejectAndGiveUp)
return false;
} else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
MMO->getSizeInBits() >= Size) {
MMO->getSizeInBits().getValue() >= Size) {
if (handleReject() == RejectAndGiveUp)
return false;
} else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
MMO->getSizeInBits() <= Size)
MMO->getSizeInBits().getValue() <= Size)
if (handleReject() == RejectAndGiveUp)
return false;

Expand Down
4 changes: 2 additions & 2 deletions llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ class GMemOperation : public GenericMachineInstr {
bool isUnordered() const { return getMMO().isUnordered(); }

/// Returns the size in bytes of the memory access.
uint64_t getMemSize() const { return getMMO().getSize(); }
LocationSize getMemSize() const { return getMMO().getSize(); }
/// Returns the size in bits of the memory access.
uint64_t getMemSizeInBits() const { return getMMO().getSizeInBits(); }
LocationSize getMemSizeInBits() const { return getMMO().getSizeInBits(); }

static bool classof(const MachineInstr *MI) {
return GenericMachineInstr::classof(MI) && MI->hasOneMemOperand();
Expand Down
35 changes: 28 additions & 7 deletions llvm/include/llvm/CodeGen/MachineFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -1026,18 +1026,27 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
/// MachineMemOperands are owned by the MachineFunction and need not be
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, LocationSize Size,
Align BaseAlignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, uint64_t Size,
Align BaseAlignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic) {
return getMachineMemOperand(PtrInfo, F, LocationSize::precise(Size),
BaseAlignment, AAInfo, Ranges, SSID, Ordering,
FailureOrdering);
}

/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
Expand All @@ -1046,9 +1055,16 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, LLT Ty);
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
int64_t Offset, LocationSize Size) {
return getMachineMemOperand(
MMO, Offset, Size == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * Size));
MMO, Offset,
!Size.hasValue() || Size.isScalable()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can this just assert Size.hasValue()? I would assume if you have the unknown case you would have been using the LLT constructor in the first place

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think quite a lot of things will construct an MMO with an unknown LocationSize, either directly or from the size in another MMO that was already unknown.

~UINT64_C(0) and MemoryLocation::UnknownSize was used a lot in the past, they have all been changed to LocationSize::beforeOrAfterPointer() as the unknown Size.

? LLT()
: LLT::scalar(8 * Size.getValue().getKnownMinValue()));
}
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
return getMachineMemOperand(MMO, Offset, LocationSize::precise(Size));
}

/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
Expand All @@ -1057,10 +1073,15 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
/// explicitly deallocated.
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
const MachinePointerInfo &PtrInfo,
uint64_t Size);
LocationSize Size);
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
const MachinePointerInfo &PtrInfo,
LLT Ty);
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
const MachinePointerInfo &PtrInfo,
uint64_t Size) {
return getMachineMemOperand(MMO, PtrInfo, LocationSize::precise(Size));
}

/// Allocate a new MachineMemOperand by copying an existing one,
/// replacing only AliasAnalysis information. MachineMemOperands are owned
Expand Down
15 changes: 10 additions & 5 deletions llvm/include/llvm/CodeGen/MachineMemOperand.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGenTypes/LowLevelType.h"
#include "llvm/IR/DerivedTypes.h"
Expand Down Expand Up @@ -186,7 +187,7 @@ class MachineMemOperand {
/// and atomic ordering requirements must also be specified. For cmpxchg
/// atomic operations the atomic ordering requirements when store does not
/// occur must also be specified.
MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LocationSize TS,
Align a, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
SyncScope::ID SSID = SyncScope::System,
Expand Down Expand Up @@ -235,13 +236,17 @@ class MachineMemOperand {
LLT getMemoryType() const { return MemoryType; }

/// Return the size in bytes of the memory reference.
uint64_t getSize() const {
return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0);
LocationSize getSize() const {
return MemoryType.isValid()
? LocationSize::precise(MemoryType.getSizeInBytes())
: LocationSize::beforeOrAfterPointer();
}

/// Return the size in bits of the memory reference.
uint64_t getSizeInBits() const {
return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0);
LocationSize getSizeInBits() const {
return MemoryType.isValid()
? LocationSize::precise(MemoryType.getSizeInBits())
: LocationSize::beforeOrAfterPointer();
}

LLT getType() const {
Expand Down
4 changes: 2 additions & 2 deletions llvm/include/llvm/CodeGen/SelectionDAG.h
Original file line number Diff line number Diff line change
Expand Up @@ -1299,15 +1299,15 @@ class SelectionDAG {
EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore,
uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
LocationSize Size = 0, const AAMDNodes &AAInfo = AAMDNodes());

inline SDValue getMemIntrinsicNode(
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo,
MaybeAlign Alignment = std::nullopt,
MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore,
uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
LocationSize Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
// Ensure that codegen never sees alignment 0
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
Alignment.value_or(getEVTAlign(MemVT)), Flags,
Expand Down
7 changes: 4 additions & 3 deletions llvm/lib/CodeGen/DFAPacketizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -252,12 +252,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
bool VLIWPacketizerList::alias(const MachineMemOperand &Op1,
const MachineMemOperand &Op2,
bool UseTBAA) const {
if (!Op1.getValue() || !Op2.getValue())
if (!Op1.getValue() || !Op2.getValue() || !Op1.getSize().hasValue() ||
!Op2.getSize().hasValue())
return true;

int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset());
int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset;
int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset;
int64_t Overlapa = Op1.getSize().getValue() + Op1.getOffset() - MinOffset;
int64_t Overlapb = Op2.getSize().getValue() + Op2.getOffset() - MinOffset;

AliasResult AAResult =
AA->alias(MemoryLocation(Op1.getValue(), Overlapa,
Expand Down
12 changes: 7 additions & 5 deletions llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -770,12 +770,12 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
LLT RegTy = MRI.getType(LoadReg);
Register PtrReg = LoadMI->getPointerReg();
unsigned RegSize = RegTy.getSizeInBits();
uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
LocationSize LoadSizeBits = LoadMI->getMemSizeInBits();
unsigned MaskSizeBits = MaskVal.countr_one();

// The mask may not be larger than the in-memory type, as it might cover sign
// extended bits
if (MaskSizeBits > LoadSizeBits)
if (MaskSizeBits > LoadSizeBits.getValue())
return false;

// If the mask covers the whole destination register, there's nothing to
Expand All @@ -795,7 +795,8 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
// still adjust the opcode to indicate the high bit behavior.
if (LoadMI->isSimple())
MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
else if (LoadSizeBits.getValue() > MaskSizeBits ||
LoadSizeBits.getValue() == RegSize)
return false;

// TODO: Could check if it's legal with the reduced or original memory size.
Expand Down Expand Up @@ -860,7 +861,8 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
// If truncating more than the original extended value, abort.
auto LoadSizeBits = LoadMI->getMemSizeInBits();
if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
if (TruncSrc &&
MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits.getValue())
return false;
if (LoadSizeBits == SizeInBits)
return true;
Expand Down Expand Up @@ -891,7 +893,7 @@ bool CombinerHelper::matchSextInRegOfLoad(
if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
return false;

uint64_t MemBits = LoadDef->getMemSizeInBits();
uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();

// If the sign extend extends from a narrower width than the load's width,
// then we can narrow the load width when we combine to a G_SEXTLOAD.
Expand Down
7 changes: 4 additions & 3 deletions llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
if (DstTy.isVector())
break;
// Everything above the retrieved bits is zero
Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits());
Known.Zero.setBitsFrom(
(*MI.memoperands_begin())->getSizeInBits().getValue());
break;
}
case TargetOpcode::G_ASHR: {
Expand Down Expand Up @@ -666,7 +667,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,

// e.g. i16->i32 = '17' bits known.
const MachineMemOperand *MMO = *MI.memoperands_begin();
return TyBits - MMO->getSizeInBits() + 1;
return TyBits - MMO->getSizeInBits().getValue() + 1;
}
case TargetOpcode::G_ZEXTLOAD: {
// FIXME: We need an in-memory type representation.
Expand All @@ -675,7 +676,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,

// e.g. i16->i32 = '16' bits known.
const MachineMemOperand *MMO = *MI.memoperands_begin();
return TyBits - MMO->getSizeInBits();
return TyBits - MMO->getSizeInBits().getValue();
}
case TargetOpcode::G_TRUNC: {
Register Src = MI.getOperand(1).getReg();
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1317,7 +1317,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (DstTy.isVector())
return UnableToLegalize;

if (8 * LoadMI.getMemSize() != DstTy.getSizeInBits()) {
if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO());
MIRBuilder.buildAnyExt(DstReg, TmpReg);
Expand All @@ -1335,7 +1335,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,

Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
auto &MMO = LoadMI.getMMO();
unsigned MemSize = MMO.getSizeInBits();
unsigned MemSize = MMO.getSizeInBits().getValue();

if (MemSize == NarrowSize) {
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
Expand Down Expand Up @@ -1368,7 +1368,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (SrcTy.isVector() && LeftoverBits != 0)
return UnableToLegalize;

if (8 * StoreMI.getMemSize() != SrcTy.getSizeInBits()) {
if (8 * StoreMI.getMemSize().getValue() != SrcTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
MIRBuilder.buildTrunc(TmpReg, SrcReg);
MIRBuilder.buildStore(TmpReg, StoreMI.getPointerReg(), StoreMI.getMMO());
Expand Down Expand Up @@ -4456,7 +4456,7 @@ LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx,
LLT ValTy = MRI.getType(ValReg);

// FIXME: Do we need a distinct NarrowMemory legalize action?
if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize()) {
if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize().getValue()) {
LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
return UnableToLegalize;
}
Expand Down
19 changes: 5 additions & 14 deletions llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,12 +117,8 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid())
return false;

LocationSize Size1 = LdSt1->getMemSize() != MemoryLocation::UnknownSize
? LdSt1->getMemSize()
: LocationSize::beforeOrAfterPointer();
LocationSize Size2 = LdSt2->getMemSize() != MemoryLocation::UnknownSize
? LdSt2->getMemSize()
: LocationSize::beforeOrAfterPointer();
LocationSize Size1 = LdSt1->getMemSize();
LocationSize Size2 = LdSt2->getMemSize();

int64_t PtrDiff;
if (BasePtr0.BaseReg == BasePtr1.BaseReg) {
Expand Down Expand Up @@ -214,14 +210,9 @@ bool GISelAddressing::instMayAlias(const MachineInstr &MI,
Offset = 0;
}

TypeSize Size = LS->getMMO().getMemoryType().getSizeInBytes();
return {LS->isVolatile(),
LS->isAtomic(),
BaseReg,
Offset /*base offset*/,
Size.isScalable() ? LocationSize::beforeOrAfterPointer()
: LocationSize::precise(Size),
&LS->getMMO()};
LocationSize Size = LS->getMMO().getSize();
return {LS->isVolatile(), LS->isAtomic(), BaseReg,
Offset /*base offset*/, Size, &LS->getMMO()};
}
// FIXME: support recognizing lifetime instructions.
// Default.
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1356,10 +1356,11 @@ InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) {
// from the stack at some point. Happily the memory operand will tell us
// the size written to the stack.
auto *MemOperand = *MI.memoperands_begin();
unsigned SizeInBits = MemOperand->getSizeInBits();
LocationSize SizeInBits = MemOperand->getSizeInBits();
assert(SizeInBits.hasValue() && "Expected to find a valid size!");

// Find that position in the stack indexes we're tracking.
auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits, 0});
auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits.getValue(), 0});
if (IdxIt == MTracker->StackSlotIdxes.end())
// That index is not tracked. This is suprising, and unlikely to ever
// occur, but the safe action is to indicate the variable is optimised out.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
llvm::transform(MI.uses(), std::back_inserter(MIOperands), GetHashableMO);

for (const auto *Op : MI.memoperands()) {
MIOperands.push_back((unsigned)Op->getSize());
MIOperands.push_back((unsigned)Op->getSize().getValue());
MIOperands.push_back((unsigned)Op->getFlags());
MIOperands.push_back((unsigned)Op->getOffset());
MIOperands.push_back((unsigned)Op->getSuccessOrdering());
Expand Down
Loading