-
Notifications
You must be signed in to change notification settings - Fork 14.2k
[IntrinsicInst] Remove MemCpyInlineInst and MemSetInlineInst [nfc] #138568
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
I'm looking for ways to simplify the Mem*Inst class structure, and these two seem to have fairly minimal justification, so let's remove them.
@llvm/pr-subscribers-llvm-analysis @llvm/pr-subscribers-llvm-selectiondag Author: Philip Reames (preames) ChangesI'm looking for ways to simplify the Mem*Inst class structure, and these two seem to have fairly minimal justification, so let's remove them. Full diff: https://github.com/llvm/llvm-project/pull/138568.diff 10 Files Affected:
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 5fc6fbfd0f28e..b4eb729c7ce38 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -207,12 +207,10 @@ class InstVisitor {
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
- RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
RetTy visitMemSetPatternInst(MemSetPatternInst &I) {
DELEGATE(IntrinsicInst);
}
RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
- RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
@@ -291,13 +289,13 @@ class InstVisitor {
case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
case Intrinsic::dbg_label: DELEGATE(DbgLabelInst);
- case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memcpy:
case Intrinsic::memcpy_inline:
- DELEGATE(MemCpyInlineInst);
+ DELEGATE(MemCpyInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
- case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::memset:
case Intrinsic::memset_inline:
- DELEGATE(MemSetInlineInst);
+ DELEGATE(MemSetInst);
case Intrinsic::experimental_memset_pattern:
DELEGATE(MemSetPatternInst);
case Intrinsic::vastart: DELEGATE(VAStartInst);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 93750d6e3845e..48b3067266125 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1215,6 +1215,16 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
+ bool isForceInlined() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::memset_inline:
+ case Intrinsic::memcpy_inline:
+ return true;
+ default:
+ return false;
+ }
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
@@ -1251,18 +1261,6 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
}
};
-/// This class wraps the llvm.memset.inline intrinsic.
-class MemSetInlineInst : public MemSetInst {
-public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset_inline;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
/// This is the base class for llvm.experimental.memset.pattern
class MemSetPatternIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
@@ -1342,18 +1340,6 @@ class MemMoveInst : public MemTransferInst {
}
};
-/// This class wraps the llvm.memcpy.inline intrinsic.
-class MemCpyInlineInst : public MemCpyInst {
-public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_inline;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
// The common base class for any memset/memmove/memcpy intrinsics;
// whether they be atomic or non-atomic.
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index f05e36e2025d4..7e540ea907893 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -344,19 +344,13 @@ void Lint::visitCallBase(CallBase &I) {
MMI->getSourceAlign(), nullptr, MemRef::Read);
break;
}
- case Intrinsic::memset: {
+ case Intrinsic::memset:
+ case Intrinsic::memset_inline: {
MemSetInst *MSI = cast<MemSetInst>(&I);
visitMemoryReference(I, MemoryLocation::getForDest(MSI),
MSI->getDestAlign(), nullptr, MemRef::Write);
break;
}
- case Intrinsic::memset_inline: {
- MemSetInlineInst *MSII = cast<MemSetInlineInst>(&I);
- visitMemoryReference(I, MemoryLocation::getForDest(MSII),
- MSII->getDestAlign(), nullptr, MemRef::Write);
- break;
- }
-
case Intrinsic::vastart:
// vastart in non-varargs function is rejected by the verifier
visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 908524313030f..b453af1b7a641 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1734,10 +1734,6 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
DstAlign = MCI->getDestAlign().valueOrOne();
SrcAlign = MCI->getSourceAlign().valueOrOne();
CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
- } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
- DstAlign = MCI->getDestAlign().valueOrOne();
- SrcAlign = MCI->getSourceAlign().valueOrOne();
- CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
} else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
DstAlign = MMI->getDestAlign().valueOrOne();
SrcAlign = MMI->getSourceAlign().valueOrOne();
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 9dc1764b49e46..1c2912358dcb3 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -319,7 +319,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
// Only expand llvm.memcpy.inline with non-constant length in this
// codepath, leaving the current SelectionDAG expansion for constant
// length memcpy intrinsics undisturbed.
- auto *Memcpy = cast<MemCpyInlineInst>(Inst);
+ auto *Memcpy = cast<MemCpyInst>(Inst);
if (isa<ConstantInt>(Memcpy->getLength()))
break;
@@ -367,7 +367,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
// Only expand llvm.memset.inline with non-constant length in this
// codepath, leaving the current SelectionDAG expansion for constant
// length memset intrinsics undisturbed.
- auto *Memset = cast<MemSetInlineInst>(Inst);
+ auto *Memset = cast<MemSetInst>(Inst);
if (isa<ConstantInt>(Memset->getLength()))
break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 881b1536a131f..9c0d0cd663f9d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6483,7 +6483,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memcpy_inline: {
- const auto &MCI = cast<MemCpyInlineInst>(I);
+ const auto &MCI = cast<MemCpyInst>(I);
SDValue Dst = getValue(I.getArgOperand(0));
SDValue Src = getValue(I.getArgOperand(1));
SDValue Size = getValue(I.getArgOperand(2));
@@ -6503,35 +6503,21 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
updateDAGForMaybeTailCall(MC);
return;
}
- case Intrinsic::memset: {
- const auto &MSI = cast<MemSetInst>(I);
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- SDValue Op3 = getValue(I.getArgOperand(2));
- // @llvm.memset defines 0 and 1 to both mean no alignment.
- Align Alignment = MSI.getDestAlign().valueOrOne();
- bool isVol = MSI.isVolatile();
- SDValue Root = isVol ? getRoot() : getMemoryRoot();
- SDValue MS = DAG.getMemset(
- Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
- &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
- updateDAGForMaybeTailCall(MS);
- return;
- }
+ case Intrinsic::memset:
case Intrinsic::memset_inline: {
- const auto &MSII = cast<MemSetInlineInst>(I);
+ const auto &MSII = cast<MemSetInst>(I);
SDValue Dst = getValue(I.getArgOperand(0));
SDValue Value = getValue(I.getArgOperand(1));
SDValue Size = getValue(I.getArgOperand(2));
- assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
+ assert((!MSII.isForceInlined() || isa<ConstantSDNode>(Size)) &&
+ "memset_inline needs constant size");
// @llvm.memset defines 0 and 1 to both mean no alignment.
Align DstAlign = MSII.getDestAlign().valueOrOne();
bool isVol = MSII.isVolatile();
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
- /* AlwaysInline */ true, &I,
- MachinePointerInfo(I.getArgOperand(0)),
- I.getAAMetadata());
+ SDValue MC = DAG.getMemset(
+ Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(),
+ &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
updateDAGForMaybeTailCall(MC);
return;
}
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index b448c0372eb0e..089bd997bc058 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -203,7 +203,7 @@ CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
if (DstAlign)
- cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
+ cast<MemSetInst>(CI)->setDestAlignment(*DstAlign);
// Set the TBAA info if present.
if (TBAATag)
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index b65a08be75640..0087d037f8cf2 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1142,18 +1142,18 @@ static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
if (Dest == OldV)
Dest = NewV;
- if (isa<MemCpyInlineInst>(MTI)) {
+ if (auto *MCI = dyn_cast<MemCpyInst>(MTI)) {
MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
- B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
- MTI->getSourceAlign(), MTI->getLength(),
- false, // isVolatile
- TBAA, TBAAStruct, ScopeMD, NoAliasMD);
- } else if (isa<MemCpyInst>(MTI)) {
- MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
- B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
- MTI->getLength(),
- false, // isVolatile
- TBAA, TBAAStruct, ScopeMD, NoAliasMD);
+ if (MCI->isForceInlined())
+ B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
+ MTI->getSourceAlign(), MTI->getLength(),
+ false, // isVolatile
+ TBAA, TBAAStruct, ScopeMD, NoAliasMD);
+ else
+ B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
+ MTI->getLength(),
+ false, // isVolatile
+ TBAA, TBAAStruct, ScopeMD, NoAliasMD);
} else {
assert(isa<MemMoveInst>(MTI));
B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index c9942b29a6f43..9c41c2798418c 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -780,7 +780,7 @@ bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI,
return false;
// If we're not allowed to hack on memcpy, we fail.
- if ((!HasMemcpy && !isa<MemCpyInlineInst>(MCI)) || DisableLIRP::Memcpy)
+ if ((!HasMemcpy && !MCI->isForceInlined()) || DisableLIRP::Memcpy)
return false;
Value *Dest = MCI->getDest();
@@ -1267,7 +1267,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to
// conservatively bail here, since otherwise we may have to transform
// llvm.memcpy.inline into llvm.memcpy which is illegal.
- if (isa<MemCpyInlineInst>(TheStore))
+ if (auto *MCI = dyn_cast<MemCpyInst>(TheStore); MCI && MCI->isForceInlined())
return false;
// The trip count of the loop and the base pointer of the addrec SCEV is
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index d8884d34c7bb9..9bb5eb577303a 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1212,7 +1212,7 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
// Don't convert llvm.memcpy.inline into memmove because memmove can be
// lowered as a call, and that is not allowed for llvm.memcpy.inline (and
// there is no inline version of llvm.memmove)
- if (isa<MemCpyInlineInst>(M))
+ if (auto *MCI = dyn_cast<MemCpyInst>(M); MCI && MCI->isForceInlined())
return false;
UseMemMove = true;
}
@@ -1229,17 +1229,20 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
NewM =
Builder.CreateMemMove(M->getDest(), M->getDestAlign(), CopySource,
CopySourceAlign, M->getLength(), M->isVolatile());
- else if (isa<MemCpyInlineInst>(M)) {
- // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
- // never allowed since that would allow the latter to be lowered as a call
- // to an external function.
- NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
- CopySource, CopySourceAlign,
- M->getLength(), M->isVolatile());
- } else
- NewM =
- Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
- CopySourceAlign, M->getLength(), M->isVolatile());
+ else if (auto *MCI = dyn_cast<MemCpyInst>(M)) {
+ if (MCI->isForceInlined())
+ // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
+ // never allowed since that would allow the latter to be lowered as a call
+ // to an external function.
+ NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
+ CopySource, CopySourceAlign,
+ M->getLength(), M->isVolatile());
+ else
+ NewM = Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
+ CopySourceAlign, M->getLength(),
+ M->isVolatile());
+ }
+
NewM->copyMetadata(*M, LLVMContext::MD_DIAssignID);
assert(isa<MemoryDef>(MSSA->getMemoryAccess(M)));
|
@llvm/pr-subscribers-llvm-globalisel Author: Philip Reames (preames) ChangesI'm looking for ways to simplify the Mem*Inst class structure, and these two seem to have fairly minimal justification, so let's remove them. Full diff: https://github.com/llvm/llvm-project/pull/138568.diff 10 Files Affected:
diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index 5fc6fbfd0f28e..b4eb729c7ce38 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -207,12 +207,10 @@ class InstVisitor {
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
- RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
RetTy visitMemSetPatternInst(MemSetPatternInst &I) {
DELEGATE(IntrinsicInst);
}
RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
- RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
@@ -291,13 +289,13 @@ class InstVisitor {
case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
case Intrinsic::dbg_label: DELEGATE(DbgLabelInst);
- case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memcpy:
case Intrinsic::memcpy_inline:
- DELEGATE(MemCpyInlineInst);
+ DELEGATE(MemCpyInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
- case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::memset:
case Intrinsic::memset_inline:
- DELEGATE(MemSetInlineInst);
+ DELEGATE(MemSetInst);
case Intrinsic::experimental_memset_pattern:
DELEGATE(MemSetPatternInst);
case Intrinsic::vastart: DELEGATE(VAStartInst);
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 93750d6e3845e..48b3067266125 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1215,6 +1215,16 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
+ bool isForceInlined() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::memset_inline:
+ case Intrinsic::memcpy_inline:
+ return true;
+ default:
+ return false;
+ }
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
@@ -1251,18 +1261,6 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
}
};
-/// This class wraps the llvm.memset.inline intrinsic.
-class MemSetInlineInst : public MemSetInst {
-public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset_inline;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
/// This is the base class for llvm.experimental.memset.pattern
class MemSetPatternIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
@@ -1342,18 +1340,6 @@ class MemMoveInst : public MemTransferInst {
}
};
-/// This class wraps the llvm.memcpy.inline intrinsic.
-class MemCpyInlineInst : public MemCpyInst {
-public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_inline;
- }
- static bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
-};
-
// The common base class for any memset/memmove/memcpy intrinsics;
// whether they be atomic or non-atomic.
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index f05e36e2025d4..7e540ea907893 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -344,19 +344,13 @@ void Lint::visitCallBase(CallBase &I) {
MMI->getSourceAlign(), nullptr, MemRef::Read);
break;
}
- case Intrinsic::memset: {
+ case Intrinsic::memset:
+ case Intrinsic::memset_inline: {
MemSetInst *MSI = cast<MemSetInst>(&I);
visitMemoryReference(I, MemoryLocation::getForDest(MSI),
MSI->getDestAlign(), nullptr, MemRef::Write);
break;
}
- case Intrinsic::memset_inline: {
- MemSetInlineInst *MSII = cast<MemSetInlineInst>(&I);
- visitMemoryReference(I, MemoryLocation::getForDest(MSII),
- MSII->getDestAlign(), nullptr, MemRef::Write);
- break;
- }
-
case Intrinsic::vastart:
// vastart in non-varargs function is rejected by the verifier
visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 908524313030f..b453af1b7a641 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1734,10 +1734,6 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
DstAlign = MCI->getDestAlign().valueOrOne();
SrcAlign = MCI->getSourceAlign().valueOrOne();
CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
- } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
- DstAlign = MCI->getDestAlign().valueOrOne();
- SrcAlign = MCI->getSourceAlign().valueOrOne();
- CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
} else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
DstAlign = MMI->getDestAlign().valueOrOne();
SrcAlign = MMI->getSourceAlign().valueOrOne();
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 9dc1764b49e46..1c2912358dcb3 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -319,7 +319,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
// Only expand llvm.memcpy.inline with non-constant length in this
// codepath, leaving the current SelectionDAG expansion for constant
// length memcpy intrinsics undisturbed.
- auto *Memcpy = cast<MemCpyInlineInst>(Inst);
+ auto *Memcpy = cast<MemCpyInst>(Inst);
if (isa<ConstantInt>(Memcpy->getLength()))
break;
@@ -367,7 +367,7 @@ bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
// Only expand llvm.memset.inline with non-constant length in this
// codepath, leaving the current SelectionDAG expansion for constant
// length memset intrinsics undisturbed.
- auto *Memset = cast<MemSetInlineInst>(Inst);
+ auto *Memset = cast<MemSetInst>(Inst);
if (isa<ConstantInt>(Memset->getLength()))
break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 881b1536a131f..9c0d0cd663f9d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6483,7 +6483,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::memcpy_inline: {
- const auto &MCI = cast<MemCpyInlineInst>(I);
+ const auto &MCI = cast<MemCpyInst>(I);
SDValue Dst = getValue(I.getArgOperand(0));
SDValue Src = getValue(I.getArgOperand(1));
SDValue Size = getValue(I.getArgOperand(2));
@@ -6503,35 +6503,21 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
updateDAGForMaybeTailCall(MC);
return;
}
- case Intrinsic::memset: {
- const auto &MSI = cast<MemSetInst>(I);
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
- SDValue Op3 = getValue(I.getArgOperand(2));
- // @llvm.memset defines 0 and 1 to both mean no alignment.
- Align Alignment = MSI.getDestAlign().valueOrOne();
- bool isVol = MSI.isVolatile();
- SDValue Root = isVol ? getRoot() : getMemoryRoot();
- SDValue MS = DAG.getMemset(
- Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
- &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
- updateDAGForMaybeTailCall(MS);
- return;
- }
+ case Intrinsic::memset:
case Intrinsic::memset_inline: {
- const auto &MSII = cast<MemSetInlineInst>(I);
+ const auto &MSII = cast<MemSetInst>(I);
SDValue Dst = getValue(I.getArgOperand(0));
SDValue Value = getValue(I.getArgOperand(1));
SDValue Size = getValue(I.getArgOperand(2));
- assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
+ assert((!MSII.isForceInlined() || isa<ConstantSDNode>(Size)) &&
+ "memset_inline needs constant size");
// @llvm.memset defines 0 and 1 to both mean no alignment.
Align DstAlign = MSII.getDestAlign().valueOrOne();
bool isVol = MSII.isVolatile();
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
- /* AlwaysInline */ true, &I,
- MachinePointerInfo(I.getArgOperand(0)),
- I.getAAMetadata());
+ SDValue MC = DAG.getMemset(
+ Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(),
+ &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
updateDAGForMaybeTailCall(MC);
return;
}
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index b448c0372eb0e..089bd997bc058 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -203,7 +203,7 @@ CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
if (DstAlign)
- cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
+ cast<MemSetInst>(CI)->setDestAlignment(*DstAlign);
// Set the TBAA info if present.
if (TBAATag)
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index b65a08be75640..0087d037f8cf2 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1142,18 +1142,18 @@ static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
if (Dest == OldV)
Dest = NewV;
- if (isa<MemCpyInlineInst>(MTI)) {
+ if (auto *MCI = dyn_cast<MemCpyInst>(MTI)) {
MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
- B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
- MTI->getSourceAlign(), MTI->getLength(),
- false, // isVolatile
- TBAA, TBAAStruct, ScopeMD, NoAliasMD);
- } else if (isa<MemCpyInst>(MTI)) {
- MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
- B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
- MTI->getLength(),
- false, // isVolatile
- TBAA, TBAAStruct, ScopeMD, NoAliasMD);
+ if (MCI->isForceInlined())
+ B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
+ MTI->getSourceAlign(), MTI->getLength(),
+ false, // isVolatile
+ TBAA, TBAAStruct, ScopeMD, NoAliasMD);
+ else
+ B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
+ MTI->getLength(),
+ false, // isVolatile
+ TBAA, TBAAStruct, ScopeMD, NoAliasMD);
} else {
assert(isa<MemMoveInst>(MTI));
B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index c9942b29a6f43..9c41c2798418c 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -780,7 +780,7 @@ bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI,
return false;
// If we're not allowed to hack on memcpy, we fail.
- if ((!HasMemcpy && !isa<MemCpyInlineInst>(MCI)) || DisableLIRP::Memcpy)
+ if ((!HasMemcpy && !MCI->isForceInlined()) || DisableLIRP::Memcpy)
return false;
Value *Dest = MCI->getDest();
@@ -1267,7 +1267,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to
// conservatively bail here, since otherwise we may have to transform
// llvm.memcpy.inline into llvm.memcpy which is illegal.
- if (isa<MemCpyInlineInst>(TheStore))
+ if (auto *MCI = dyn_cast<MemCpyInst>(TheStore); MCI && MCI->isForceInlined())
return false;
// The trip count of the loop and the base pointer of the addrec SCEV is
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index d8884d34c7bb9..9bb5eb577303a 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1212,7 +1212,7 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
// Don't convert llvm.memcpy.inline into memmove because memmove can be
// lowered as a call, and that is not allowed for llvm.memcpy.inline (and
// there is no inline version of llvm.memmove)
- if (isa<MemCpyInlineInst>(M))
+ if (auto *MCI = dyn_cast<MemCpyInst>(M); MCI && MCI->isForceInlined())
return false;
UseMemMove = true;
}
@@ -1229,17 +1229,20 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
NewM =
Builder.CreateMemMove(M->getDest(), M->getDestAlign(), CopySource,
CopySourceAlign, M->getLength(), M->isVolatile());
- else if (isa<MemCpyInlineInst>(M)) {
- // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
- // never allowed since that would allow the latter to be lowered as a call
- // to an external function.
- NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
- CopySource, CopySourceAlign,
- M->getLength(), M->isVolatile());
- } else
- NewM =
- Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
- CopySourceAlign, M->getLength(), M->isVolatile());
+ else if (auto *MCI = dyn_cast<MemCpyInst>(M)) {
+ if (MCI->isForceInlined())
+ // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
+ // never allowed since that would allow the latter to be lowered as a call
+ // to an external function.
+ NewM = Builder.CreateMemCpyInline(M->getDest(), M->getDestAlign(),
+ CopySource, CopySourceAlign,
+ M->getLength(), M->isVolatile());
+ else
+ NewM = Builder.CreateMemCpy(M->getDest(), M->getDestAlign(), CopySource,
+ CopySourceAlign, M->getLength(),
+ M->isVolatile());
+ }
+
NewM->copyMetadata(*M, LLVMContext::MD_DIAssignID);
assert(isa<MemoryDef>(MSSA->getMemoryAccess(M)));
|
You can test this locally with the following command:git-clang-format --diff HEAD~1 HEAD --extensions cpp,h -- llvm/include/llvm/IR/InstVisitor.h llvm/include/llvm/IR/IntrinsicInst.h llvm/lib/Analysis/Lint.cpp llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp llvm/lib/IR/IRBuilder.cpp llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp View the diff from clang-format here.diff --git a/llvm/include/llvm/IR/InstVisitor.h b/llvm/include/llvm/IR/InstVisitor.h
index b4eb729c7..437f2ad41 100644
--- a/llvm/include/llvm/IR/InstVisitor.h
+++ b/llvm/include/llvm/IR/InstVisitor.h
@@ -206,11 +206,11 @@ public:
{ DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
- RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemSetPatternInst(MemSetPatternInst &I) {
DELEGATE(IntrinsicInst);
}
- RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 744a0fa57..cbe91e22d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6516,8 +6516,8 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isVol = MSII.isVolatile();
SDValue Root = isVol ? getRoot() : getMemoryRoot();
SDValue MC = DAG.getMemset(
- Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(),
- &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
+ Root, sdl, Dst, Value, Size, DstAlign, isVol, MSII.isForceInlined(), &I,
+ MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
updateDAGForMaybeTailCall(MC);
return;
}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Upstream change: llvm/llvm-project#138568
Upstream change: llvm/llvm-project#138568
Migrate their usage to the AnyMem*Inst family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants. As with llvm#138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough".
Migrate their usage to the `AnyMem*Inst` family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants. As with #138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain Mem*Inst and the AnyMem*Inst, but that's a much larger and more risky change. Not sure I'm going to actually do that.
…lvm#138568) I'm looking for ways to simplify the Mem*Inst class structure, and these two seem to have fairly minimal justification, so let's remove them.
Migrate their usage to the `AnyMem*Inst` family, and add a isAtomic() query on the base class for that hierarchy. This matches the idioms we use for e.g. isAtomic on load, store, etc.. instructions, the existing isVolatile idioms on mem* routines, and allows us to more easily share code between atomic and non-atomic variants. As with llvm#138568, the goal here is to simplify the class hierarchy and make it easier to reason about. I'm moving from easiest to hardest, and will stop at some point when I hit "good enough". Longer term, I'd sorta like to merge or reverse the naming on the plain Mem*Inst and the AnyMem*Inst, but that's a much larger and more risky change. Not sure I'm going to actually do that.
I'm looking for ways to simplify the Mem*Inst class structure, and these two seem to have fairly minimal justification, so let's remove them.