Skip to content

Commit e1e131e

Browse files
Clean up usages of asserting vector getters in Type
Summary: Remove usages of asserting vector getters in Type in preparation for the VectorType refactor. The existence of these functions complicates the refactor while adding little value. Reviewers: grosbach, efriedma, sdesmalen Reviewed By: efriedma Subscribers: hiraditya, dmgreen, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D77271
1 parent bd1ccfe commit e1e131e

File tree

3 files changed

+46
-48
lines changed

3 files changed

+46
-48
lines changed

llvm/lib/Target/ARM/ARMISelLowering.cpp

Lines changed: 22 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -17757,7 +17757,7 @@ bool ARMTargetLowering::lowerInterleavedLoad(
1775717757
"Unmatched number of shufflevectors and indices");
1775817758

1775917759
VectorType *VecTy = Shuffles[0]->getType();
17760-
Type *EltTy = VecTy->getVectorElementType();
17760+
Type *EltTy = VecTy->getElementType();
1776117761

1776217762
const DataLayout &DL = LI->getModule()->getDataLayout();
1776317763

@@ -17772,8 +17772,7 @@ bool ARMTargetLowering::lowerInterleavedLoad(
1777217772
// A pointer vector can not be the return type of the ldN intrinsics. Need to
1777317773
// load integer vectors first and then convert to pointer vectors.
1777417774
if (EltTy->isPointerTy())
17775-
VecTy =
17776-
VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
17775+
VecTy = VectorType::get(DL.getIntPtrType(EltTy), VecTy->getNumElements());
1777717776

1777817777
IRBuilder<> Builder(LI);
1777917778

@@ -17783,15 +17782,15 @@ bool ARMTargetLowering::lowerInterleavedLoad(
1778317782
if (NumLoads > 1) {
1778417783
// If we're going to generate more than one load, reset the sub-vector type
1778517784
// to something legal.
17786-
VecTy = VectorType::get(VecTy->getVectorElementType(),
17787-
VecTy->getVectorNumElements() / NumLoads);
17785+
VecTy = VectorType::get(VecTy->getElementType(),
17786+
VecTy->getNumElements() / NumLoads);
1778817787

1778917788
// We will compute the pointer operand of each load from the original base
1779017789
// address using GEPs. Cast the base address to a pointer to the scalar
1779117790
// element type.
1779217791
BaseAddr = Builder.CreateBitCast(
17793-
BaseAddr, VecTy->getVectorElementType()->getPointerTo(
17794-
LI->getPointerAddressSpace()));
17792+
BaseAddr,
17793+
VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
1779517794
}
1779617795

1779717796
assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
@@ -17816,8 +17815,8 @@ bool ARMTargetLowering::lowerInterleavedLoad(
1781617815
"expected interleave factor of 2 or 4 for MVE");
1781717816
Intrinsic::ID LoadInts =
1781817817
Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
17819-
Type *VecEltTy = VecTy->getVectorElementType()->getPointerTo(
17820-
LI->getPointerAddressSpace());
17818+
Type *VecEltTy =
17819+
VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace());
1782117820
Type *Tys[] = {VecTy, VecEltTy};
1782217821
Function *VldnFunc =
1782317822
Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
@@ -17837,9 +17836,8 @@ bool ARMTargetLowering::lowerInterleavedLoad(
1783717836
// If we're generating more than one load, compute the base address of
1783817837
// subsequent loads as an offset from the previous.
1783917838
if (LoadCount > 0)
17840-
BaseAddr =
17841-
Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr,
17842-
VecTy->getVectorNumElements() * Factor);
17839+
BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
17840+
VecTy->getNumElements() * Factor);
1784317841

1784417842
CallInst *VldN = createLoadIntrinsic(BaseAddr);
1784517843

@@ -17854,8 +17852,8 @@ bool ARMTargetLowering::lowerInterleavedLoad(
1785417852
// Convert the integer vector to pointer vector if the element is pointer.
1785517853
if (EltTy->isPointerTy())
1785617854
SubVec = Builder.CreateIntToPtr(
17857-
SubVec, VectorType::get(SV->getType()->getVectorElementType(),
17858-
VecTy->getVectorNumElements()));
17855+
SubVec, VectorType::get(SV->getType()->getElementType(),
17856+
VecTy->getNumElements()));
1785917857

1786017858
SubVecs[SV].push_back(SubVec);
1786117859
}
@@ -17908,11 +17906,10 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
1790817906
"Invalid interleave factor");
1790917907

1791017908
VectorType *VecTy = SVI->getType();
17911-
assert(VecTy->getVectorNumElements() % Factor == 0 &&
17912-
"Invalid interleaved store");
17909+
assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
1791317910

17914-
unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
17915-
Type *EltTy = VecTy->getVectorElementType();
17911+
unsigned LaneLen = VecTy->getNumElements() / Factor;
17912+
Type *EltTy = VecTy->getElementType();
1791617913
VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
1791717914

1791817915
const DataLayout &DL = SI->getModule()->getDataLayout();
@@ -17935,8 +17932,8 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
1793517932
Type *IntTy = DL.getIntPtrType(EltTy);
1793617933

1793717934
// Convert to the corresponding integer vector.
17938-
Type *IntVecTy =
17939-
VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
17935+
Type *IntVecTy = VectorType::get(
17936+
IntTy, cast<VectorType>(Op0->getType())->getNumElements());
1794017937
Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
1794117938
Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
1794217939

@@ -17950,14 +17947,14 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
1795017947
// If we're going to generate more than one store, reset the lane length
1795117948
// and sub-vector type to something legal.
1795217949
LaneLen /= NumStores;
17953-
SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen);
17950+
SubVecTy = VectorType::get(SubVecTy->getElementType(), LaneLen);
1795417951

1795517952
// We will compute the pointer operand of each store from the original base
1795617953
// address using GEPs. Cast the base address to a pointer to the scalar
1795717954
// element type.
1795817955
BaseAddr = Builder.CreateBitCast(
17959-
BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
17960-
SI->getPointerAddressSpace()));
17956+
BaseAddr,
17957+
SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
1796117958
}
1796217959

1796317960
assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
@@ -17987,7 +17984,7 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
1798717984
"expected interleave factor of 2 or 4 for MVE");
1798817985
Intrinsic::ID StoreInts =
1798917986
Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
17990-
Type *EltPtrTy = SubVecTy->getVectorElementType()->getPointerTo(
17987+
Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo(
1799117988
SI->getPointerAddressSpace());
1799217989
Type *Tys[] = {EltPtrTy, SubVecTy};
1799317990
Function *VstNFunc =
@@ -18009,7 +18006,7 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
1800918006
// If we generating more than one store, we compute the base address of
1801018007
// subsequent stores as an offset from the previous.
1801118008
if (StoreCount > 0)
18012-
BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(),
18009+
BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
1801318010
BaseAddr, LaneLen * Factor);
1801418011

1801518012
SmallVector<Value *, 4> Shuffles;

llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
434434
Opcode == Instruction::ExtractElement)) {
435435
// Cross-class copies are expensive on many microarchitectures,
436436
// so assume they are expensive by default.
437-
if (ValTy->getVectorElementType()->isIntegerTy())
437+
if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
438438
return 3;
439439

440440
// Even if it's not a cross class copy, this likely leads to mixing
@@ -452,7 +452,7 @@ int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
452452
// result anyway.
453453
return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
454454
ST->getMVEVectorCostFactor()) *
455-
ValTy->getVectorNumElements() / 2;
455+
cast<VectorType>(ValTy)->getNumElements() / 2;
456456
}
457457

458458
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
@@ -794,8 +794,8 @@ int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
794794
return LT.first * BaseCost;
795795

796796
// Else this is expand, assume that we need to scalarize this op.
797-
if (Ty->isVectorTy()) {
798-
unsigned Num = Ty->getVectorNumElements();
797+
if (auto *VTy = dyn_cast<VectorType>(Ty)) {
798+
unsigned Num = VTy->getNumElements();
799799
unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType());
800800
// Return the cost of multiple scalar invocation plus the cost of
801801
// inserting and extracting the values.
@@ -812,7 +812,7 @@ int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
812812

813813
if (ST->hasNEON() && Src->isVectorTy() &&
814814
(Alignment && *Alignment != Align(16)) &&
815-
Src->getVectorElementType()->isDoubleTy()) {
815+
cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
816816
// Unaligned loads/stores are extremely inefficient.
817817
// We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
818818
return LT.first * 4;
@@ -835,7 +835,7 @@ int ARMTTIImpl::getInterleavedMemoryOpCost(
835835

836836
if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
837837
!UseMaskForCond && !UseMaskForGaps) {
838-
unsigned NumElts = VecTy->getVectorNumElements();
838+
unsigned NumElts = cast<VectorType>(VecTy)->getNumElements();
839839
auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
840840

841841
// vldN/vstN only support legal vector types of size 64 or 128 in bits.
@@ -1403,7 +1403,7 @@ bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
14031403
case Instruction::ICmp:
14041404
case Instruction::Add:
14051405
return ScalarBits < 64 &&
1406-
(ScalarBits * Ty->getVectorNumElements()) % 128 == 0;
1406+
(ScalarBits * cast<VectorType>(Ty)->getNumElements()) % 128 == 0;
14071407
default:
14081408
llvm_unreachable("Unhandled reduction opcode");
14091409
}

llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,8 @@ Value *MVEGatherScatterLowering::checkGEP(Value *&Offsets, Type *Ty, Value *Ptr,
157157
}
158158
Offsets = GEP->getOperand(1);
159159
// Paranoid check whether the number of parallel lanes is the same
160-
assert(Ty->getVectorNumElements() ==
161-
Offsets->getType()->getVectorNumElements());
160+
assert(cast<VectorType>(Ty)->getNumElements() ==
161+
cast<VectorType>(Offsets->getType())->getNumElements());
162162
// Only <N x i32> offsets can be integrated into an arm gather, any smaller
163163
// type would have to be sign extended by the gep - and arm gathers can only
164164
// zero extend. Additionally, the offsets do have to originate from a zext of
@@ -168,7 +168,7 @@ Value *MVEGatherScatterLowering::checkGEP(Value *&Offsets, Type *Ty, Value *Ptr,
168168
return nullptr;
169169
if (ZExtInst *ZextOffs = dyn_cast<ZExtInst>(Offsets))
170170
Offsets = ZextOffs->getOperand(0);
171-
else if (!(Offsets->getType()->getVectorNumElements() == 4 &&
171+
else if (!(cast<VectorType>(Offsets->getType())->getNumElements() == 4 &&
172172
Offsets->getType()->getScalarSizeInBits() == 32))
173173
return nullptr;
174174

@@ -191,9 +191,9 @@ Value *MVEGatherScatterLowering::checkGEP(Value *&Offsets, Type *Ty, Value *Ptr,
191191
void MVEGatherScatterLowering::lookThroughBitcast(Value *&Ptr) {
192192
// Look through bitcast instruction if #elements is the same
193193
if (auto *BitCast = dyn_cast<BitCastInst>(Ptr)) {
194-
Type *BCTy = BitCast->getType();
195-
Type *BCSrcTy = BitCast->getOperand(0)->getType();
196-
if (BCTy->getVectorNumElements() == BCSrcTy->getVectorNumElements()) {
194+
auto *BCTy = cast<VectorType>(BitCast->getType());
195+
auto *BCSrcTy = cast<VectorType>(BitCast->getOperand(0)->getType());
196+
if (BCTy->getNumElements() == BCSrcTy->getNumElements()) {
197197
LLVM_DEBUG(
198198
dbgs() << "masked gathers/scatters: looking through bitcast\n");
199199
Ptr = BitCast->getOperand(0);
@@ -223,14 +223,14 @@ Value *MVEGatherScatterLowering::lowerGather(IntrinsicInst *I) {
223223
// @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
224224
// Attempt to turn the masked gather in I into a MVE intrinsic
225225
// Potentially optimising the addressing modes as we do so.
226-
Type *Ty = I->getType();
226+
auto *Ty = cast<VectorType>(I->getType());
227227
Value *Ptr = I->getArgOperand(0);
228228
unsigned Alignment = cast<ConstantInt>(I->getArgOperand(1))->getZExtValue();
229229
Value *Mask = I->getArgOperand(2);
230230
Value *PassThru = I->getArgOperand(3);
231231

232-
if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(),
233-
Ty->getScalarSizeInBits(), Alignment))
232+
if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(),
233+
Alignment))
234234
return nullptr;
235235
lookThroughBitcast(Ptr);
236236
assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
@@ -267,9 +267,9 @@ Value *MVEGatherScatterLowering::tryCreateMaskedGatherBase(IntrinsicInst *I,
267267
Value *Ptr,
268268
IRBuilder<> &Builder) {
269269
using namespace PatternMatch;
270-
Type *Ty = I->getType();
270+
auto *Ty = cast<VectorType>(I->getType());
271271
LLVM_DEBUG(dbgs() << "masked gathers: loading from vector of pointers\n");
272-
if (Ty->getVectorNumElements() != 4 || Ty->getScalarSizeInBits() != 32)
272+
if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32)
273273
// Can't build an intrinsic for this
274274
return nullptr;
275275
Value *Mask = I->getArgOperand(2);
@@ -357,11 +357,12 @@ Value *MVEGatherScatterLowering::lowerScatter(IntrinsicInst *I) {
357357
Value *Input = I->getArgOperand(0);
358358
Value *Ptr = I->getArgOperand(1);
359359
unsigned Alignment = cast<ConstantInt>(I->getArgOperand(2))->getZExtValue();
360-
Type *Ty = Input->getType();
360+
auto *Ty = cast<VectorType>(Input->getType());
361361

362-
if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(),
363-
Ty->getScalarSizeInBits(), Alignment))
362+
if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(),
363+
Alignment))
364364
return nullptr;
365+
365366
lookThroughBitcast(Ptr);
366367
assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
367368

@@ -386,9 +387,9 @@ Value *MVEGatherScatterLowering::tryCreateMaskedScatterBase(
386387
using namespace PatternMatch;
387388
Value *Input = I->getArgOperand(0);
388389
Value *Mask = I->getArgOperand(3);
389-
Type *Ty = Input->getType();
390+
auto *Ty = cast<VectorType>(Input->getType());
390391
// Only QR variants allow truncating
391-
if (!(Ty->getVectorNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) {
392+
if (!(Ty->getNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) {
392393
// Can't build an intrinsic for this
393394
return nullptr;
394395
}

0 commit comments

Comments
 (0)