-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[LLVM] Make more use of IRBuilder::CreateIntrinsic. NFC. #112706
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Convert many instances of: Fn = Intrinsic::getOrInsertDeclaration(...); CreateCall(Fn, ...) to the equivalent CreateIntrinsic call.
@llvm/pr-subscribers-llvm-transforms @llvm/pr-subscribers-backend-arm Author: Jay Foad (jayfoad) ChangesConvert many instances of: Patch is 94.86 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/112706.diff 43 Files Affected:
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index 32ba3e91822ddb..dd18b524e3f9c1 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -235,13 +235,12 @@ Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
// TODO add caching
// Scalable vector %evl conversion.
if (ElemCount.isScalable()) {
- auto *M = Builder.GetInsertBlock()->getModule();
Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
- Function *ActiveMaskFunc = Intrinsic::getOrInsertDeclaration(
- M, Intrinsic::get_active_lane_mask, {BoolVecTy, EVLParam->getType()});
// `get_active_lane_mask` performs an implicit less-than comparison.
Value *ConstZero = Builder.getInt32(0);
- return Builder.CreateCall(ActiveMaskFunc, {ConstZero, EVLParam});
+ return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
+ {BoolVecTy, EVLParam->getType()},
+ {ConstZero, EVLParam});
}
// Fixed vector %evl conversion.
@@ -299,18 +298,18 @@ Value *CachingVPExpander::expandPredicationToIntCall(
case Intrinsic::umin: {
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
- Function *Fn = Intrinsic::getOrInsertDeclaration(
- VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
- Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
+ Value *NewOp = Builder.CreateIntrinsic(
+ UnpredicatedIntrinsicID, {VPI.getType()}, {Op0, Op1},
+ /*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
case Intrinsic::bswap:
case Intrinsic::bitreverse: {
Value *Op = VPI.getOperand(0);
- Function *Fn = Intrinsic::getOrInsertDeclaration(
- VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
- Value *NewOp = Builder.CreateCall(Fn, {Op}, VPI.getName());
+ Value *NewOp =
+ Builder.CreateIntrinsic(UnpredicatedIntrinsicID, {VPI.getType()}, {Op},
+ /*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
@@ -327,9 +326,9 @@ Value *CachingVPExpander::expandPredicationToFPCall(
case Intrinsic::fabs:
case Intrinsic::sqrt: {
Value *Op0 = VPI.getOperand(0);
- Function *Fn = Intrinsic::getOrInsertDeclaration(
- VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
- Value *NewOp = Builder.CreateCall(Fn, {Op0}, VPI.getName());
+ Value *NewOp =
+ Builder.CreateIntrinsic(UnpredicatedIntrinsicID, {VPI.getType()}, {Op0},
+ /*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
@@ -337,9 +336,9 @@ Value *CachingVPExpander::expandPredicationToFPCall(
case Intrinsic::minnum: {
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
- Function *Fn = Intrinsic::getOrInsertDeclaration(
- VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
- Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
+ Value *NewOp = Builder.CreateIntrinsic(
+ UnpredicatedIntrinsicID, {VPI.getType()}, {Op0, Op1},
+ /*FMFSource=*/nullptr, VPI.getName());
replaceOperation(*NewOp, VPI);
return NewOp;
}
@@ -592,12 +591,10 @@ bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
if (StaticElemCount.isScalable()) {
// TODO add caching
- auto *M = VPI.getModule();
- Function *VScaleFunc =
- Intrinsic::getOrInsertDeclaration(M, Intrinsic::vscale, Int32Ty);
IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
- Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
+ Value *VScale = Builder.CreateIntrinsic(Intrinsic::vscale, Int32Ty, {},
+ /*FMFSource=*/nullptr, "vscale");
MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
/*NUW*/ true, /*NSW*/ false);
} else {
diff --git a/llvm/lib/CodeGen/HardwareLoops.cpp b/llvm/lib/CodeGen/HardwareLoops.cpp
index c8a63304a3b63b..86fec239c3edb2 100644
--- a/llvm/lib/CodeGen/HardwareLoops.cpp
+++ b/llvm/lib/CodeGen/HardwareLoops.cpp
@@ -512,8 +512,7 @@ Value* HardwareLoop::InsertIterationSetup(Value *LoopCountInit) {
: Intrinsic::test_set_loop_iterations)
: (UsePhi ? Intrinsic::start_loop_iterations
: Intrinsic::set_loop_iterations);
- Function *LoopIter = Intrinsic::getOrInsertDeclaration(M, ID, Ty);
- Value *LoopSetup = Builder.CreateCall(LoopIter, LoopCountInit);
+ Value *LoopSetup = Builder.CreateIntrinsic(ID, Ty, LoopCountInit);
// Use the return value of the intrinsic to control the entry of the loop.
if (UseLoopGuard) {
@@ -541,10 +540,9 @@ void HardwareLoop::InsertLoopDec() {
Attribute::StrictFP))
CondBuilder.setIsFPConstrained(true);
- Function *DecFunc = Intrinsic::getOrInsertDeclaration(
- M, Intrinsic::loop_decrement, LoopDecrement->getType());
Value *Ops[] = { LoopDecrement };
- Value *NewCond = CondBuilder.CreateCall(DecFunc, Ops);
+ Value *NewCond = CondBuilder.CreateIntrinsic(Intrinsic::loop_decrement,
+ LoopDecrement->getType(), Ops);
Value *OldCond = ExitBranch->getCondition();
ExitBranch->setCondition(NewCond);
@@ -565,10 +563,9 @@ Instruction* HardwareLoop::InsertLoopRegDec(Value *EltsRem) {
Attribute::StrictFP))
CondBuilder.setIsFPConstrained(true);
- Function *DecFunc = Intrinsic::getOrInsertDeclaration(
- M, Intrinsic::loop_decrement_reg, {EltsRem->getType()});
Value *Ops[] = { EltsRem, LoopDecrement };
- Value *Call = CondBuilder.CreateCall(DecFunc, Ops);
+ Value *Call = CondBuilder.CreateIntrinsic(Intrinsic::loop_decrement_reg,
+ {EltsRem->getType()}, Ops);
LLVM_DEBUG(dbgs() << "HWLoops: Inserted loop dec: " << *Call << "\n");
return cast<Instruction>(Call);
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 519ff8d74c5af4..3aceb5227bb389 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1757,8 +1757,7 @@ static Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
Type *Ty = CI.getType();
Value *Op0 = CI.getOperand(0);
Value *Op1 = CI.getOperand(1);
- Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
- Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
+ Value *Res = Builder.CreateIntrinsic(IID, Ty, {Op0, Op1});
if (CI.arg_size() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
@@ -1784,8 +1783,7 @@ static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
}
Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
- Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
- Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
+ Value *Res = Builder.CreateIntrinsic(IID, Ty, {Src, Src, Amt});
if (CI.arg_size() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
@@ -1854,8 +1852,7 @@ static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
}
Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
- Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
- Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
+ Value *Res = Builder.CreateIntrinsic(IID, Ty, {Op0, Op1, Amt});
unsigned NumArgs = CI.arg_size();
if (NumArgs >= 4) { // For masked intrinsics.
@@ -1915,9 +1912,8 @@ static Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr,
static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
Type *Ty = CI.getType();
Value *Op0 = CI.getArgOperand(0);
- Function *F =
- Intrinsic::getOrInsertDeclaration(CI.getModule(), Intrinsic::abs, Ty);
- Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
+ Value *Res = Builder.CreateIntrinsic(Intrinsic::abs, Ty,
+ {Op0, Builder.getInt1(false)});
if (CI.arg_size() == 3)
Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
return Res;
@@ -2009,9 +2005,8 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
// Replace a masked intrinsic with an older unmasked intrinsic.
static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
Intrinsic::ID IID) {
- Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID);
- Value *Rep = Builder.CreateCall(Intrin,
- { CI.getArgOperand(0), CI.getArgOperand(1) });
+ Value *Rep = Builder.CreateIntrinsic(
+ IID, {}, {CI.getArgOperand(0), CI.getArgOperand(1)});
return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
}
@@ -2480,9 +2475,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
} else if (Name == "sse.sqrt.ss" || Name == "sse2.sqrt.sd") {
Value *Vec = CI->getArgOperand(0);
Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
- Function *Intr = Intrinsic::getOrInsertDeclaration(
- F->getParent(), Intrinsic::sqrt, Elt0->getType());
- Elt0 = Builder.CreateCall(Intr, Elt0);
+ Elt0 = Builder.CreateIntrinsic(Intrinsic::sqrt, Elt0->getType(), Elt0);
Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
} else if (Name.starts_with("avx.sqrt.p") ||
Name.starts_with("sse2.sqrt.p") ||
@@ -2770,9 +2763,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
: Intrinsic::x86_avx512_sitofp_round;
- Function *F = Intrinsic::getOrInsertDeclaration(CI->getModule(), IID,
- {DstTy, SrcTy});
- Rep = Builder.CreateCall(F, {Rep, CI->getArgOperand(3)});
+ Rep = Builder.CreateIntrinsic(IID, {DstTy, SrcTy},
+ {Rep, CI->getArgOperand(3)});
} else {
Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
: Builder.CreateSIToFP(Rep, DstTy, "cvt");
@@ -2813,9 +2805,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
ResultTy->getNumElements());
- Function *ELd = Intrinsic::getOrInsertDeclaration(
- F->getParent(), Intrinsic::masked_expandload, ResultTy);
- Rep = Builder.CreateCall(ELd, {Ptr, MaskVec, CI->getOperand(1)});
+ Rep = Builder.CreateIntrinsic(Intrinsic::masked_expandload, ResultTy,
+ {Ptr, MaskVec, CI->getOperand(1)});
} else if (Name.starts_with("avx512.mask.compress.store.")) {
auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
Type *PtrTy = ResultTy->getElementType();
@@ -2828,9 +2819,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
getX86MaskVec(Builder, CI->getArgOperand(2),
cast<FixedVectorType>(ResultTy)->getNumElements());
- Function *CSt = Intrinsic::getOrInsertDeclaration(
- F->getParent(), Intrinsic::masked_compressstore, ResultTy);
- Rep = Builder.CreateCall(CSt, {CI->getArgOperand(1), Ptr, MaskVec});
+ Rep = Builder.CreateIntrinsic(Intrinsic::masked_compressstore, ResultTy,
+ {CI->getArgOperand(1), Ptr, MaskVec});
} else if (Name.starts_with("avx512.mask.compress.") ||
Name.starts_with("avx512.mask.expand.")) {
auto *ResultTy = cast<FixedVectorType>(CI->getType());
@@ -2841,10 +2831,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
bool IsCompress = Name[12] == 'c';
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
- Function *Intr =
- Intrinsic::getOrInsertDeclaration(F->getParent(), IID, ResultTy);
- Rep = Builder.CreateCall(Intr,
- {CI->getOperand(0), CI->getOperand(1), MaskVec});
+ Rep = Builder.CreateIntrinsic(
+ IID, ResultTy, {CI->getOperand(0), CI->getOperand(1), MaskVec});
} else if (Name.starts_with("xop.vpcom")) {
bool IsSigned;
if (Name.ends_with("ub") || Name.ends_with("uw") || Name.ends_with("ud") ||
@@ -2905,11 +2893,10 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
bool ZeroMask = Name[11] == 'z';
Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
} else if (Name == "sse42.crc32.64.8") {
- Function *CRC32 = Intrinsic::getOrInsertDeclaration(
- F->getParent(), Intrinsic::x86_sse42_crc32_32_8);
Value *Trunc0 =
Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
- Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
+ Rep = Builder.CreateIntrinsic(Intrinsic::x86_sse42_crc32_32_8, {},
+ {Trunc0, CI->getArgOperand(1)});
Rep = Builder.CreateZExt(Rep, CI->getType(), "");
} else if (Name.starts_with("avx.vbroadcast.s") ||
Name.starts_with("avx512.vbroadcast.s")) {
@@ -3769,12 +3756,9 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_vfmadd_f64;
else
IID = Intrinsic::x86_avx512_vfmadd_f32;
- Function *FMA = Intrinsic::getOrInsertDeclaration(CI->getModule(), IID);
- Rep = Builder.CreateCall(FMA, Ops);
+ Rep = Builder.CreateIntrinsic(IID, {}, Ops);
} else {
- Function *FMA = Intrinsic::getOrInsertDeclaration(
- CI->getModule(), Intrinsic::fma, A->getType());
- Rep = Builder.CreateCall(FMA, {A, B, C});
+ Rep = Builder.CreateIntrinsic(Intrinsic::fma, A->getType(), {A, B, C});
}
Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType())
@@ -3827,9 +3811,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Rep = Builder.CreateIntrinsic(IID, {}, {A, B, C, CI->getArgOperand(4)});
} else {
- Function *FMA = Intrinsic::getOrInsertDeclaration(
- CI->getModule(), Intrinsic::fma, A->getType());
- Rep = Builder.CreateCall(FMA, {A, B, C});
+ Rep = Builder.CreateIntrinsic(Intrinsic::fma, A->getType(), {A, B, C});
}
Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType())
@@ -4088,8 +4070,8 @@ static Value *upgradeAArch64IntrinsicCall(StringRef Name, CallBase *CI,
Args[1] = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
GoodPredTy, Args[1]);
- Function *NewF = Intrinsic::getOrInsertDeclaration(CI->getModule(), NewID);
- return Builder.CreateCall(NewF, Args, CI->getName());
+ return Builder.CreateIntrinsic(NewID, {}, Args, /*FMFSource=*/nullptr,
+ CI->getName());
}
static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
@@ -4171,8 +4153,8 @@ static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Ops.push_back(Op);
}
- Function *Fn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
- return Builder.CreateCall(Fn, Ops, CI->getName());
+ return Builder.CreateIntrinsic(ID, Tys, Ops, /*FMFSource=*/nullptr,
+ CI->getName());
}
llvm_unreachable("Unknown function for ARM CallBase upgrade.");
}
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 3654bf9a9e70b5..f340f7aafdc76f 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -90,10 +90,8 @@ Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
if (cast<ConstantInt>(Scaling)->isZero())
return Scaling;
- Module *M = GetInsertBlock()->getParent()->getParent();
- Function *TheFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::vscale,
- {Scaling->getType()});
- CallInst *CI = CreateCall(TheFn, {}, {}, Name);
+ CallInst *CI =
+ CreateIntrinsic(Intrinsic::vscale, {Scaling->getType()}, {}, {}, Name);
return cast<ConstantInt>(Scaling)->isOne() ? CI : CreateMul(CI, Scaling);
}
@@ -140,12 +138,9 @@ CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
MDNode *TBAATag, MDNode *ScopeTag,
MDNode *NoAliasTag) {
Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
- Type *Tys[] = { Ptr->getType(), Size->getType() };
- Module *M = BB->getParent()->getParent();
- Function *TheFn =
- Intrinsic::getOrInsertDeclaration(M, Intrinsic::memset, Tys);
+ Type *Tys[] = {Ptr->getType(), Size->getType()};
- CallInst *CI = CreateCall(TheFn, Ops);
+ CallInst *CI = CreateIntrinsic(Intrinsic::memset, Tys, Ops);
if (Align)
cast<MemSetInst>(CI)->setDestAlignment(*Align);
@@ -170,11 +165,8 @@ CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
MDNode *NoAliasTag) {
Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
Type *Tys[] = {Dst->getType(), Size->getType()};
- Module *M = BB->getParent()->getParent();
- Function *TheFn =
- Intrinsic::getOrInsertDeclaration(M, Intrinsic::memset_inline, Tys);
- CallInst *CI = CreateCall(TheFn, Ops);
+ CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
if (DstAlign)
cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
@@ -198,11 +190,9 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
Type *Tys[] = {Ptr->getType(), Size->getType()};
- Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getOrInsertDeclaration(
- M, Intrinsic::memset_element_unordered_atomic, Tys);
- CallInst *CI = CreateCall(TheFn, Ops);
+ CallInst *CI =
+ CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
@@ -227,11 +217,9 @@ CallInst *IRBuilderBase::CreateMemTransferInst(
IntrID == Intrinsic::memmove) &&
"Unexpected intrinsic ID");
Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
- Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
- Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getOrInsertDeclaration(M, IntrID, Tys);
+ Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
- CallInst *CI = CreateCall(TheFn, Ops);
+ CallInst *CI = CreateIntrinsic(IntrID, Tys, Ops);
auto* MCI = cast<MemTransferInst>(CI);
if (DstAlign)
@@ -266,11 +254,9 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
"Pointer alignment must be at least element size");
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
- Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getOrInsertDeclaration(
- M, Intrinsic::memcpy_element_unordered_atomic, Tys);
- CallInst *CI = CreateCall(TheFn, Ops);
+ CallInst *CI =
+ CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
// Set the alignment of the pointer args.
auto *AMCI = cast<AtomicMemCpyInst>(CI);
@@ -382,11 +368,9 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
"Pointer alignment must be at least element size");
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getTyp...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Some of these could use CreateUnaryIntrinsic or CreateBinaryIntrinsic to avoid specifying the overload type, but this is an improvement in any case...
Convert many instances of:
Fn = Intrinsic::getOrInsertDeclaration(...);
CreateCall(Fn, ...)
to the equivalent CreateIntrinsic call.