Skip to content

Commit 279fa8e

Browse files
committed
[Alignement][NFC] Deprecate untyped CreateAlignedLoad
Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: arsenm, jvesely, nhaehnle, hiraditya, kerbowa, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D73260
1 parent 1e0174a commit 279fa8e

21 files changed

+98
-89
lines changed

llvm/include/llvm/IR/IRBuilder.h

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1759,8 +1759,10 @@ class IRBuilder : public IRBuilderBase, public Inserter {
17591759
/// parameter.
17601760
/// FIXME: Remove this function once transition to Align is over.
17611761
/// Use the version that takes MaybeAlign instead of this one.
1762-
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
1763-
const char *Name) {
1762+
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
1763+
unsigned Align,
1764+
const char *Name),
1765+
"Use the version that takes NaybeAlign instead") {
17641766
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
17651767
}
17661768
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
@@ -1771,8 +1773,10 @@ class IRBuilder : public IRBuilderBase, public Inserter {
17711773
}
17721774
/// FIXME: Remove this function once transition to Align is over.
17731775
/// Use the version that takes MaybeAlign instead of this one.
1774-
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
1775-
const Twine &Name = "") {
1776+
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
1777+
unsigned Align,
1778+
const Twine &Name = ""),
1779+
"Use the version that takes MaybeAlign instead") {
17761780
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
17771781
}
17781782
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
@@ -1783,8 +1787,11 @@ class IRBuilder : public IRBuilderBase, public Inserter {
17831787
}
17841788
/// FIXME: Remove this function once transition to Align is over.
17851789
/// Use the version that takes MaybeAlign instead of this one.
1786-
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align,
1787-
bool isVolatile, const Twine &Name = "") {
1790+
LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,
1791+
unsigned Align,
1792+
bool isVolatile,
1793+
const Twine &Name = ""),
1794+
"Use the version that takes MaybeAlign instead") {
17881795
return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
17891796
}
17901797
LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
@@ -1797,19 +1804,19 @@ class IRBuilder : public IRBuilderBase, public Inserter {
17971804
// Deprecated [opaque pointer types]
17981805
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
17991806
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1800-
Align, Name);
1807+
MaybeAlign(Align), Name);
18011808
}
18021809
// Deprecated [opaque pointer types]
18031810
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
18041811
const Twine &Name = "") {
18051812
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1806-
Align, Name);
1813+
MaybeAlign(Align), Name);
18071814
}
18081815
// Deprecated [opaque pointer types]
18091816
LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
18101817
const Twine &Name = "") {
18111818
return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1812-
Align, isVolatile, Name);
1819+
MaybeAlign(Align), isVolatile, Name);
18131820
}
18141821
// Deprecated [opaque pointer types]
18151822
LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {

llvm/include/llvm/IR/Instructions.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,12 @@ class AllocaInst : public UnaryInstruction {
109109

110110
/// Return the alignment of the memory that is being allocated by the
111111
/// instruction.
112+
MaybeAlign getAlign() const {
113+
return decodeMaybeAlign(getSubclassDataFromInstruction() & 31);
114+
}
115+
// FIXME: Remove this one transition to Align is over.
112116
unsigned getAlignment() const {
113-
if (const auto MA = decodeMaybeAlign(getSubclassDataFromInstruction() & 31))
117+
if (const auto MA = getAlign())
114118
return MA->value();
115119
return 0;
116120
}

llvm/lib/CodeGen/AtomicExpandPass.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1622,7 +1622,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
16221622
bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL);
16231623
Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
16241624

1625-
unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy);
1625+
const llvm::Align AllocaAlignment(DL.getPrefTypeAlignment(SizedIntTy));
16261626

16271627
// TODO: the "order" argument type is "int", not int32. So
16281628
// getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
@@ -1712,7 +1712,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
17121712
// 'expected' argument, if present.
17131713
if (CASExpected) {
17141714
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
1715-
AllocaCASExpected->setAlignment(MaybeAlign(AllocaAlignment));
1715+
AllocaCASExpected->setAlignment(AllocaAlignment);
17161716
unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();
17171717

17181718
AllocaCASExpected_i8 =
@@ -1731,7 +1731,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
17311731
Args.push_back(IntValue);
17321732
} else {
17331733
AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
1734-
AllocaValue->setAlignment(MaybeAlign(AllocaAlignment));
1734+
AllocaValue->setAlignment(AllocaAlignment);
17351735
AllocaValue_i8 =
17361736
Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
17371737
Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
@@ -1743,7 +1743,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
17431743
// 'ret' argument.
17441744
if (!CASExpected && HasResult && !UseSizedLibcall) {
17451745
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
1746-
AllocaResult->setAlignment(MaybeAlign(AllocaAlignment));
1746+
AllocaResult->setAlignment(AllocaAlignment);
17471747
unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
17481748
AllocaResult_i8 =
17491749
Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));

llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1220,7 +1220,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
12201220
"interleaved.wide.ptrcast");
12211221

12221222
// Create the wide load and update the MemorySSA.
1223-
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(),
1223+
auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlign(),
12241224
"interleaved.wide.load");
12251225
auto MSSAU = MemorySSAUpdater(&MSSA);
12261226
MemoryUse *MSSALoad = cast<MemoryUse>(MSSAU.createMemoryAccessBefore(

llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ static bool lowerLoadRelative(Function &F) {
4646
Value *OffsetPtr =
4747
B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1));
4848
Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy);
49-
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, 4);
49+
Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, Align(4));
5050

5151
Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32);
5252

llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
130130
Value *Mask = CI->getArgOperand(2);
131131
Value *Src0 = CI->getArgOperand(3);
132132

133-
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
133+
const Align AlignVal = cast<ConstantInt>(Alignment)->getAlignValue();
134134
VectorType *VecType = cast<VectorType>(CI->getType());
135135

136136
Type *EltTy = VecType->getElementType();
@@ -151,7 +151,8 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
151151
}
152152

153153
// Adjust alignment for the scalar instruction.
154-
AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
154+
const Align AdjustedAlignVal =
155+
commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
155156
// Bitcast %addr from i8* to EltTy*
156157
Type *NewPtrType =
157158
EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
@@ -166,7 +167,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
166167
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
167168
continue;
168169
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
169-
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
170+
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
170171
VResult = Builder.CreateInsertElement(VResult, Load, Idx);
171172
}
172173
CI->replaceAllUsesWith(VResult);
@@ -210,7 +211,7 @@ static void scalarizeMaskedLoad(CallInst *CI, bool &ModifiedDT) {
210211
Builder.SetInsertPoint(InsertPt);
211212

212213
Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx);
213-
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal);
214+
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal);
214215
Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
215216

216217
// Create "else" block, fill it in the next iteration
@@ -414,8 +415,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
414415
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
415416
continue;
416417
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
417-
LoadInst *Load =
418-
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
418+
LoadInst *Load = Builder.CreateAlignedLoad(
419+
EltTy, Ptr, MaybeAlign(AlignVal), "Load" + Twine(Idx));
419420
VResult =
420421
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
421422
}
@@ -459,8 +460,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
459460
Builder.SetInsertPoint(InsertPt);
460461

461462
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
462-
LoadInst *Load =
463-
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
463+
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, MaybeAlign(AlignVal),
464+
"Load" + Twine(Idx));
464465
Value *NewVResult =
465466
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
466467

@@ -624,8 +625,8 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
624625
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
625626
continue;
626627
Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex);
627-
LoadInst *Load =
628-
Builder.CreateAlignedLoad(EltTy, NewPtr, 1, "Load" + Twine(Idx));
628+
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1),
629+
"Load" + Twine(Idx));
629630
VResult =
630631
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
631632
++MemIndex;
@@ -670,7 +671,7 @@ static void scalarizeMaskedExpandLoad(CallInst *CI, bool &ModifiedDT) {
670671
"cond.load");
671672
Builder.SetInsertPoint(InsertPt);
672673

673-
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, 1);
674+
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1));
674675
Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx);
675676

676677
// Move the pointer if there are more blocks to come.

llvm/lib/IR/AutoUpgrade.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2308,7 +2308,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
23082308
Type *VT = VectorType::get(EltTy, NumSrcElts);
23092309
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
23102310
PointerType::getUnqual(VT));
2311-
Value *Load = Builder.CreateAlignedLoad(VT, Op, 1);
2311+
Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
23122312
if (NumSrcElts == 2)
23132313
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
23142314
{ 0, 1, 0, 1 });
@@ -3054,7 +3054,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
30543054
// Convert the type of the pointer to a pointer to the stored type.
30553055
Value *BC =
30563056
Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
3057-
LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8);
3057+
LoadInst *LI =
3058+
Builder.CreateAlignedLoad(VTy, BC, Align(VTy->getBitWidth() / 8));
30583059
LI->setMetadata(M->getMDKindID("nontemporal"), Node);
30593060
Rep = LI;
30603061
} else if (IsX86 && (Name.startswith("fma.vfmadd.") ||

llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
160160
ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS),
161161
ArgPtr->getName() + ".cast");
162162
LoadInst *Load =
163-
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign.value());
163+
Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
164164
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
165165

166166
MDBuilder MDB(Ctx);

llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -251,10 +251,10 @@ AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
251251
// 32-bit and extract sequence is already present, and it is probably easier
252252
// to CSE this. The loads should be mergable later anyway.
253253
Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
254-
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4);
254+
LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
255255

256256
Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
257-
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4);
257+
LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
258258

259259
MDNode *MD = MDNode::get(Mod->getContext(), None);
260260
LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);

llvm/lib/Target/ARM/ARMParallelDSP.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -772,8 +772,7 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
772772
const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
773773
Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
774774
LoadTy->getPointerTo(AddrSpace));
775-
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
776-
Base->getAlignment());
775+
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign());
777776

778777
// Make sure everything is in the correct order in the basic block.
779778
MoveBefore(Base->getPointerOperand(), VecPtr);

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27474,7 +27474,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
2747427474
// Finally we can emit the atomic load.
2747527475
LoadInst *Loaded =
2747627476
Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
27477-
AI->getType()->getPrimitiveSizeInBits());
27477+
Align(AI->getType()->getPrimitiveSizeInBits()));
2747827478
Loaded->setAtomic(Order, SSID);
2747927479
AI->replaceAllUsesWith(Loaded);
2748027480
AI->eraseFromParent();

llvm/lib/Target/X86/X86InterleavedAccess.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ void X86InterleavedAccessGroup::decompose(
216216
Value *NewBasePtr =
217217
Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
218218
Instruction *NewLoad =
219-
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlignment());
219+
Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlign());
220220
DecomposedVectors.push_back(NewLoad);
221221
}
222222
}

llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1056,7 +1056,8 @@ static Value *simplifyX86vpermv(const IntrinsicInst &II,
10561056
// * Narrow width by halfs excluding zero/undef lanes
10571057
Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
10581058
Value *LoadPtr = II.getArgOperand(0);
1059-
unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue();
1059+
const Align Alignment =
1060+
cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
10601061

10611062
// If the mask is all ones or undefs, this is a plain vector load of the 1st
10621063
// argument.
@@ -1066,9 +1067,9 @@ Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
10661067

10671068
// If we can unconditionally load from this address, replace with a
10681069
// load/select idiom. TODO: use DT for context sensitive query
1069-
if (isDereferenceableAndAlignedPointer(
1070-
LoadPtr, II.getType(), MaybeAlign(Alignment),
1071-
II.getModule()->getDataLayout(), &II, nullptr)) {
1070+
if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
1071+
II.getModule()->getDataLayout(), &II,
1072+
nullptr)) {
10721073
Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
10731074
"unmaskedload");
10741075
return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
@@ -1459,7 +1460,7 @@ static Value *simplifyNeonVld1(const IntrinsicInst &II,
14591460

14601461
auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
14611462
PointerType::get(II.getType(), 0));
1462-
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment);
1463+
return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
14631464
}
14641465

14651466
// Returns true iff the 2 intrinsics have the same operands, limiting the

llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -462,12 +462,11 @@ LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy,
462462
NewPtr->getType()->getPointerAddressSpace() == AS))
463463
NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
464464

465-
unsigned Align = LI.getAlignment();
466-
if (!Align)
467465
// If old load did not have an explicit alignment specified,
468466
// manually preserve the implied (ABI) alignment of the load.
469467
// Else we may inadvertently incorrectly over-promise alignment.
470-
Align = getDataLayout().getABITypeAlignment(LI.getType());
468+
const auto Align =
469+
getDataLayout().getValueOrABITypeAlignment(LI.getAlign(), LI.getType());
471470

472471
LoadInst *NewLoad = Builder.CreateAlignedLoad(
473472
NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix);
@@ -674,9 +673,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
674673
if (SL->hasPadding())
675674
return nullptr;
676675

677-
auto Align = LI.getAlignment();
678-
if (!Align)
679-
Align = DL.getABITypeAlignment(ST);
676+
const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), ST);
680677

681678
auto *Addr = LI.getPointerOperand();
682679
auto *IdxType = Type::getInt32Ty(T->getContext());
@@ -690,9 +687,9 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
690687
};
691688
auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
692689
Name + ".elt");
693-
auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
694-
auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
695-
EltAlign, Name + ".unpack");
690+
auto *L = IC.Builder.CreateAlignedLoad(
691+
ST->getElementType(i), Ptr,
692+
commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
696693
// Propagate AA metadata. It'll still be valid on the narrowed load.
697694
AAMDNodes AAMD;
698695
LI.getAAMetadata(AAMD);
@@ -725,9 +722,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
725722

726723
const DataLayout &DL = IC.getDataLayout();
727724
auto EltSize = DL.getTypeAllocSize(ET);
728-
auto Align = LI.getAlignment();
729-
if (!Align)
730-
Align = DL.getABITypeAlignment(T);
725+
const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), T);
731726

732727
auto *Addr = LI.getPointerOperand();
733728
auto *IdxType = Type::getInt64Ty(T->getContext());
@@ -742,8 +737,9 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
742737
};
743738
auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
744739
Name + ".elt");
745-
auto *L = IC.Builder.CreateAlignedLoad(
746-
AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
740+
auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
741+
commonAlignment(Align, Offset),
742+
Name + ".unpack");
747743
AAMDNodes AAMD;
748744
LI.getAAMetadata(AAMD);
749745
L->setAAMetadata(AAMD);

0 commit comments

Comments
 (0)