Skip to content

[IR] Remove the AtomicMem*Inst helper classes #138710

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions llvm/include/llvm/Analysis/MemoryLocation.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ class StoreInst;
class MemTransferInst;
class MemIntrinsic;
class AtomicCmpXchgInst;
class AtomicMemTransferInst;
class AtomicMemIntrinsic;
class AtomicRMWInst;
class AnyMemTransferInst;
class AnyMemIntrinsic;
Expand Down Expand Up @@ -253,13 +251,11 @@ class MemoryLocation {

/// Return a location representing the source of a memory transfer.
static MemoryLocation getForSource(const MemTransferInst *MTI);
static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
static MemoryLocation getForSource(const AnyMemTransferInst *MTI);

/// Return a location representing the destination of a memory set or
/// transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI);
static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
static std::optional<MemoryLocation> getForDest(const CallBase *CI,
const TargetLibraryInfo &TLI);
Expand Down
118 changes: 24 additions & 94 deletions llvm/include/llvm/IR/IntrinsicInst.h
Original file line number Diff line number Diff line change
Expand Up @@ -1107,100 +1107,6 @@ template <class BaseCL> class MemSetBase : public BaseCL {
}
};

// The common base class for the atomic memset/memmove/memcpy intrinsics
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
private:
enum { ARG_ELEMENTSIZE = 3 };

public:
Value *getRawElementSizeInBytes() const {
return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
}

ConstantInt *getElementSizeInBytesCst() const {
return cast<ConstantInt>(getRawElementSizeInBytes());
}

uint32_t getElementSizeInBytes() const {
return getElementSizeInBytesCst()->getZExtValue();
}

void setElementSizeInBytes(Constant *V) {
assert(V->getType() == Type::getInt8Ty(getContext()) &&
"setElementSizeInBytes called with value of wrong type!");
setArgOperand(ARG_ELEMENTSIZE, V);
}

static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic:
return true;
default:
return false;
}
}
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};

/// This class represents atomic memset intrinsic
// i.e. llvm.element.unordered.atomic.memset
class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
public:
static bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
}
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};

// This class wraps the atomic memcpy/memmove intrinsics
// i.e. llvm.element.unordered.atomic.memcpy/memmove
class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
public:
static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
return true;
default:
return false;
}
}
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};

/// This class represents the atomic memcpy intrinsic
/// i.e. llvm.element.unordered.atomic.memcpy
class AtomicMemCpyInst : public AtomicMemTransferInst {
public:
static bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
}
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};

/// This class represents the atomic memmove intrinsic
/// i.e. llvm.element.unordered.atomic.memmove
class AtomicMemMoveInst : public AtomicMemTransferInst {
public:
static bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
}
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};

/// This is the common base class for memset/memcpy/memmove.
class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
Expand Down Expand Up @@ -1345,6 +1251,9 @@ class MemMoveInst : public MemTransferInst {
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
// and llvm.memset/memcpy/memmove
class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
private:
enum { ARG_ELEMENTSIZE = 3 };

public:
bool isVolatile() const {
// Only the non-atomic intrinsics can be volatile
Expand All @@ -1353,6 +1262,17 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
return false;
}

bool isAtomic() const {
switch (getIntrinsicID()) {
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic:
return true;
default:
return false;
}
}

static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy:
Expand All @@ -1371,6 +1291,16 @@ class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}

Value *getRawElementSizeInBytes() const {
assert(isAtomic());
return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
}

uint32_t getElementSizeInBytes() const {
assert(isAtomic());
return cast<ConstantInt>(getRawElementSizeInBytes())->getZExtValue();
}
};

/// This class represents any memset intrinsic
Expand Down
6 changes: 3 additions & 3 deletions llvm/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

namespace llvm {

class AtomicMemCpyInst;
class AnyMemCpyInst;
class ConstantInt;
class Instruction;
class MemCpyInst;
Expand Down Expand Up @@ -62,10 +62,10 @@ void expandMemSetAsLoop(MemSetInst *MemSet);
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet);

/// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy,
void expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemCpy,
const TargetTransformInfo &TTI,
ScalarEvolution *SE);

} // End llvm namespace
} // namespace llvm

#endif
8 changes: 0 additions & 8 deletions llvm/lib/Analysis/MemoryLocation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,6 @@ MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
return getForSource(cast<AnyMemTransferInst>(MTI));
}

MemoryLocation MemoryLocation::getForSource(const AtomicMemTransferInst *MTI) {
return getForSource(cast<AnyMemTransferInst>(MTI));
}

MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
assert(MTI->getRawSource() == MTI->getArgOperand(1));
return getForArgument(MTI, 1, nullptr);
Expand All @@ -108,10 +104,6 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
return getForDest(cast<AnyMemIntrinsic>(MI));
}

MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
return getForDest(cast<AnyMemIntrinsic>(MI));
}

MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
assert(MI->getRawDest() == MI->getArgOperand(0));
return getForArgument(MI, 0, nullptr);
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6525,7 +6525,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memcpy_element_unordered_atomic: {
const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
auto &MI = cast<AnyMemCpyInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Src = getValue(MI.getRawSource());
SDValue Length = getValue(MI.getLength());
Expand All @@ -6541,7 +6541,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memmove_element_unordered_atomic: {
auto &MI = cast<AtomicMemMoveInst>(I);
auto &MI = cast<AnyMemMoveInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Src = getValue(MI.getRawSource());
SDValue Length = getValue(MI.getLength());
Expand All @@ -6557,7 +6557,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memset_element_unordered_atomic: {
auto &MI = cast<AtomicMemSetInst>(I);
auto &MI = cast<AnyMemSetInst>(I);
SDValue Dst = getValue(MI.getRawDest());
SDValue Val = getValue(MI.getValue());
SDValue Length = getValue(MI.getLength());
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/IR/IRBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
CallInst *CI =
CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);

cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
cast<AnyMemSetInst>(CI)->setDestAlignment(Alignment);

// Set the TBAA info if present.
if (TBAATag)
Expand Down Expand Up @@ -293,7 +293,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);

// Set the alignment of the pointer args.
auto *AMCI = cast<AtomicMemCpyInst>(CI);
auto *AMCI = cast<AnyMemCpyInst>(CI);
AMCI->setDestAlignment(DstAlign);
AMCI->setSourceAlignment(SrcAlign);

Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/IR/Verifier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5617,7 +5617,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic: {
const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
const auto *AMI = cast<AnyMemIntrinsic>(&Call);

ConstantInt *ElementSizeCI =
cast<ConstantInt>(AMI->getRawElementSizeInBytes());
Expand All @@ -5632,7 +5632,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
};
Check(IsValidAlignment(AMI->getDestAlign()),
"incorrect alignment of the destination argument", Call);
if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
if (const auto *AMT = dyn_cast<AnyMemTransferInst>(AMI)) {
Check(IsValidAlignment(AMT->getSourceAlign()),
"incorrect alignment of the source argument", Call);
}
Expand Down
43 changes: 21 additions & 22 deletions llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
// introduce the unaligned memory access which will be later transformed
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
if (isa<AtomicMemTransferInst>(MI))
if (MI->isAtomic())
if (*CopyDstAlign < Size || *CopySrcAlign < Size)
return nullptr;

Expand Down Expand Up @@ -204,7 +204,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
L->setVolatile(MT->isVolatile());
S->setVolatile(MT->isVolatile());
}
if (isa<AtomicMemTransferInst>(MI)) {
if (MI->isAtomic()) {
// atomics have to be unordered
L->setOrdering(AtomicOrdering::Unordered);
S->setOrdering(AtomicOrdering::Unordered);
Expand Down Expand Up @@ -255,9 +255,8 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
// introduce the unaligned memory access which will be later transformed
// into libcall in CodeGen. This is not evident performance gain so disable
// it now.
if (isa<AtomicMemSetInst>(MI))
if (Alignment < Len)
return nullptr;
if (MI->isAtomic() && Alignment < Len)
return nullptr;

// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
Expand All @@ -276,7 +275,7 @@ Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
for_each(at::getDVRAssignmentMarkers(S), replaceOpForAssignmentMarkers);

S->setAlignment(Alignment);
if (isa<AtomicMemSetInst>(MI))
if (MI->isAtomic())
S->setOrdering(AtomicOrdering::Unordered);

// Set the size of the copy to 0, it will be deleted on the next iteration.
Expand Down Expand Up @@ -1654,27 +1653,27 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}

IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallBase(CI);

// For atomic unordered mem intrinsics if len is not a positive or
// not a multiple of element size then behavior is undefined.
if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
if (NumBytes->isNegative() ||
(NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
CreateNonTerminatorUnreachable(AMI);
assert(AMI->getType()->isVoidTy() &&
"non void atomic unordered mem intrinsic");
return eraseInstFromFunction(*AMI);
}
if (!II)
return visitCallBase(CI);

// Intrinsics cannot occur in an invoke or a callbr, so handle them here
// instead of in visitCallBase.
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
// memmove/cpy/set of zero bytes is a noop.
if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(MI->getLength())) {
// memmove/cpy/set of zero bytes is a noop.
if (NumBytes->isNullValue())
return eraseInstFromFunction(CI);

// For atomic unordered mem intrinsics if len is not a positive or
// not a multiple of element size then behavior is undefined.
if (MI->isAtomic() &&
(NumBytes->isNegative() ||
(NumBytes->getZExtValue() % MI->getElementSizeInBytes() != 0))) {
CreateNonTerminatorUnreachable(MI);
assert(MI->getType()->isVoidTy() &&
"non void atomic unordered mem intrinsic");
return eraseInstFromFunction(*MI);
}
}

// No other transformations apply to volatile transfers.
Expand Down Expand Up @@ -1719,7 +1718,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getModule();
Intrinsic::ID MemCpyID =
isa<AtomicMemMoveInst>(MMI)
MMI->isAtomic()
? Intrinsic::memcpy_element_unordered_atomic
: Intrinsic::memcpy;
Type *Tys[3] = { CI.getArgOperand(0)->getType(),
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -670,10 +670,10 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
assert(DeadSize > ToRemoveSize && "Can't remove more than original size");

uint64_t NewSize = DeadSize - ToRemoveSize;
if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
if (DeadIntrinsic->isAtomic()) {
// When shortening an atomic memory intrinsic, the newly shortened
// length must remain an integer multiple of the element size.
const uint32_t ElementSize = AMI->getElementSizeInBytes();
const uint32_t ElementSize = DeadIntrinsic->getElementSizeInBytes();
if (0 != NewSize % ElementSize)
return false;
}
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3054,7 +3054,8 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
// non-leaf memcpy/memmove without deopt state just treat it as a leaf
// copy and don't produce a statepoint.
if (!AllowStatepointWithNoDeoptInfo && !Call->hasDeoptState()) {
assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
assert(isa<AnyMemTransferInst>(Call) &&
cast<AnyMemTransferInst>(Call)->isAtomic() &&
"Don't expect any other calls here!");
return false;
}
Expand Down
Loading
Loading