Skip to content

Commit 9ab5474

Browse files
authored
[LV] Rename ToVectorTy to toVectorTy (NFC) (#120404)
This is for consistency with other helpers (and also follows the LLVM naming conventions).
1 parent 93d4b1f commit 9ab5474

File tree

3 files changed

+51
-51
lines changed

3 files changed

+51
-51
lines changed

llvm/include/llvm/IR/VectorTypeUtils.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@ namespace llvm {
1616
/// A helper function for converting Scalar types to vector types. If
1717
/// the incoming type is void, we return void. If the EC represents a
1818
/// scalar, we return the scalar type.
19-
inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
19+
inline Type *toVectorTy(Type *Scalar, ElementCount EC) {
2020
if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
2121
return Scalar;
2222
return VectorType::get(Scalar, EC);
2323
}
2424

25-
inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
26-
return ToVectorTy(Scalar, ElementCount::getFixed(VF));
25+
inline Type *toVectorTy(Type *Scalar, unsigned VF) {
26+
return toVectorTy(Scalar, ElementCount::getFixed(VF));
2727
}
2828

2929
/// A helper for converting structs of scalar types to structs of vector types.
@@ -41,7 +41,7 @@ Type *toScalarizedStructTy(StructType *StructTy);
4141
bool isVectorizedStructTy(StructType *StructTy);
4242

4343
/// A helper for converting to vectorized types. For scalar types, this is
44-
/// equivalent to calling `ToVectorTy`. For struct types, this returns a new
44+
/// equivalent to calling `toVectorTy`. For struct types, this returns a new
4545
/// struct where each element type has been widened to a vector type.
4646
/// Note:
4747
/// - If the incoming type is void, we return void
@@ -50,7 +50,7 @@ bool isVectorizedStructTy(StructType *StructTy);
5050
inline Type *toVectorizedTy(Type *Ty, ElementCount EC) {
5151
if (StructType *StructTy = dyn_cast<StructType>(Ty))
5252
return toVectorizedStructTy(StructTy, EC);
53-
return ToVectorTy(Ty, EC);
53+
return toVectorTy(Ty, EC);
5454
}
5555

5656
/// A helper for converting vectorized types to scalarized (non-vector) types.

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,8 +1251,8 @@ class LoopVectorizationCostModel {
12511251
return false;
12521252

12531253
// Get the source and destination types of the truncate.
1254-
Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1255-
Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1254+
Type *SrcTy = toVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1255+
Type *DestTy = toVectorTy(cast<CastInst>(I)->getDestTy(), VF);
12561256

12571257
// If the truncate is free for the given types, return false. Replacing a
12581258
// free truncate with an induction variable would add an induction variable
@@ -3535,14 +3535,14 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
35353535
}
35363536
InstructionCost SafeDivisorCost = 0;
35373537

3538-
auto *VecTy = ToVectorTy(I->getType(), VF);
3538+
auto *VecTy = toVectorTy(I->getType(), VF);
35393539

35403540
// The cost of the select guard to ensure all lanes are well defined
35413541
// after we speculate above any internal control flow.
3542-
SafeDivisorCost += TTI.getCmpSelInstrCost(
3543-
Instruction::Select, VecTy,
3544-
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
3545-
CmpInst::BAD_ICMP_PREDICATE, CostKind);
3542+
SafeDivisorCost +=
3543+
TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
3544+
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
3545+
CmpInst::BAD_ICMP_PREDICATE, CostKind);
35463546

35473547
// Certain instructions can be cheaper to vectorize if they have a constant
35483548
// second vector operand. One example of this are shifts on x86.
@@ -4662,7 +4662,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
46624662
}
46634663

46644664
auto WillWiden = [&TTI, VF](Type *ScalarTy) {
4665-
Type *VectorTy = ToVectorTy(ScalarTy, VF);
4665+
Type *VectorTy = toVectorTy(ScalarTy, VF);
46664666
unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
46674667
if (!NumLegalParts)
46684668
return false;
@@ -5653,7 +5653,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
56535653
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
56545654
if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
56555655
ScalarCost += TTI.getScalarizationOverhead(
5656-
cast<VectorType>(ToVectorTy(I->getType(), VF)),
5656+
cast<VectorType>(toVectorTy(I->getType(), VF)),
56575657
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ true,
56585658
/*Extract*/ false, CostKind);
56595659
ScalarCost +=
@@ -5672,7 +5672,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
56725672
Worklist.push_back(J);
56735673
else if (needsExtract(J, VF)) {
56745674
ScalarCost += TTI.getScalarizationOverhead(
5675-
cast<VectorType>(ToVectorTy(J->getType(), VF)),
5675+
cast<VectorType>(toVectorTy(J->getType(), VF)),
56765676
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
56775677
/*Extract*/ true, CostKind);
56785678
}
@@ -5783,7 +5783,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
57835783

57845784
unsigned AS = getLoadStoreAddressSpace(I);
57855785
Value *Ptr = getLoadStorePointerOperand(I);
5786-
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5786+
Type *PtrTy = toVectorTy(Ptr->getType(), VF);
57875787
// NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
57885788
// that it is being called from this specific place.
57895789

@@ -5834,7 +5834,7 @@ InstructionCost
58345834
LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
58355835
ElementCount VF) {
58365836
Type *ValTy = getLoadStoreType(I);
5837-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5837+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
58385838
Value *Ptr = getLoadStorePointerOperand(I);
58395839
unsigned AS = getLoadStoreAddressSpace(I);
58405840
int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
@@ -5866,7 +5866,7 @@ LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
58665866
assert(Legal->isUniformMemOp(*I, VF));
58675867

58685868
Type *ValTy = getLoadStoreType(I);
5869-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5869+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
58705870
const Align Alignment = getLoadStoreAlignment(I);
58715871
unsigned AS = getLoadStoreAddressSpace(I);
58725872
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
@@ -5892,7 +5892,7 @@ InstructionCost
58925892
LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
58935893
ElementCount VF) {
58945894
Type *ValTy = getLoadStoreType(I);
5895-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5895+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
58965896
const Align Alignment = getLoadStoreAlignment(I);
58975897
const Value *Ptr = getLoadStorePointerOperand(I);
58985898

@@ -5910,7 +5910,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
59105910

59115911
Instruction *InsertPos = Group->getInsertPos();
59125912
Type *ValTy = getLoadStoreType(InsertPos);
5913-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5913+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
59145914
unsigned AS = getLoadStoreAddressSpace(InsertPos);
59155915
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
59165916

@@ -6155,7 +6155,7 @@ InstructionCost LoopVectorizationCostModel::getScalarizationOverhead(
61556155
return 0;
61566156

61576157
InstructionCost Cost = 0;
6158-
Type *RetTy = ToVectorTy(I->getType(), VF);
6158+
Type *RetTy = toVectorTy(I->getType(), VF);
61596159
if (!RetTy->isVoidTy() &&
61606160
(!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
61616161
Cost += TTI.getScalarizationOverhead(
@@ -6421,9 +6421,9 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) {
64216421

64226422
bool MaskRequired = Legal->isMaskRequired(CI);
64236423
// Compute corresponding vector type for return value and arguments.
6424-
Type *RetTy = ToVectorTy(ScalarRetTy, VF);
6424+
Type *RetTy = toVectorTy(ScalarRetTy, VF);
64256425
for (Type *ScalarTy : ScalarTys)
6426-
Tys.push_back(ToVectorTy(ScalarTy, VF));
6426+
Tys.push_back(toVectorTy(ScalarTy, VF));
64276427

64286428
// An in-loop reduction using an fmuladd intrinsic is a special case;
64296429
// we don't want the normal cost for that intrinsic.
@@ -6613,7 +6613,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
66136613
HasSingleCopyAfterVectorization(I, VF));
66146614
VectorTy = RetTy;
66156615
} else
6616-
VectorTy = ToVectorTy(RetTy, VF);
6616+
VectorTy = toVectorTy(RetTy, VF);
66176617

66186618
if (VF.isVector() && VectorTy->isVectorTy() &&
66196619
!TTI.getNumberOfParts(VectorTy))
@@ -6673,8 +6673,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
66736673
return Switch->getNumCases() *
66746674
TTI.getCmpSelInstrCost(
66756675
Instruction::ICmp,
6676-
ToVectorTy(Switch->getCondition()->getType(), VF),
6677-
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
6676+
toVectorTy(Switch->getCondition()->getType(), VF),
6677+
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
66786678
CmpInst::ICMP_EQ, CostKind);
66796679
}
66806680
case Instruction::PHI: {
@@ -6719,8 +6719,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
67196719
}
67206720
return (Phi->getNumIncomingValues() - 1) *
67216721
TTI.getCmpSelInstrCost(
6722-
Instruction::Select, ToVectorTy(ResultTy, VF),
6723-
ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6722+
Instruction::Select, toVectorTy(ResultTy, VF),
6723+
toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
67246724
CmpInst::BAD_ICMP_PREDICATE, CostKind);
67256725
}
67266726

@@ -6729,8 +6729,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
67296729
if (VF.isVector() && foldTailWithEVL() &&
67306730
Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
67316731
IntrinsicCostAttributes ICA(
6732-
Intrinsic::vp_merge, ToVectorTy(Phi->getType(), VF),
6733-
{ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
6732+
Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
6733+
{toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
67346734
return TTI.getIntrinsicInstrCost(ICA, CostKind);
67356735
}
67366736

@@ -6870,7 +6870,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
68706870
ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
68716871
}
68726872

6873-
VectorTy = ToVectorTy(ValTy, VF);
6873+
VectorTy = toVectorTy(ValTy, VF);
68746874
return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
68756875
cast<CmpInst>(I)->getPredicate(), CostKind,
68766876
{TTI::OK_AnyValue, TTI::OP_None},
@@ -6888,7 +6888,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
68886888
if (Decision == CM_Scalarize)
68896889
Width = ElementCount::getFixed(1);
68906890
}
6891-
VectorTy = ToVectorTy(getLoadStoreType(I), Width);
6891+
VectorTy = toVectorTy(getLoadStoreType(I), Width);
68926892
return getMemoryInstructionCost(I, VF);
68936893
}
68946894
case Instruction::BitCast:
@@ -6969,7 +6969,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
69696969
SrcScalarTy =
69706970
IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
69716971
Type *SrcVecTy =
6972-
VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6972+
VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
69736973

69746974
if (canTruncateToMinimalBitwidth(I, VF)) {
69756975
// If the result type is <= the source type, there will be no extend
@@ -7498,7 +7498,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
74987498
// Pre-compute the cost for I, if it has a reduction pattern cost.
74997499
for (Instruction *I : ChainOpsAndOperands) {
75007500
auto ReductionCost = CM.getReductionPatternCost(
7501-
I, VF, ToVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
7501+
I, VF, toVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
75027502
if (!ReductionCost)
75037503
continue;
75047504

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1031,11 +1031,11 @@ InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
10311031
Arguments.push_back(V);
10321032
}
10331033

1034-
Type *RetTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1034+
Type *RetTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
10351035
SmallVector<Type *> ParamTys;
10361036
for (unsigned I = 0; I != getNumOperands(); ++I)
10371037
ParamTys.push_back(
1038-
ToVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
1038+
toVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
10391039

10401040
// TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
10411041
FastMathFlags FMF = hasFastMathFlags() ? getFastMathFlags() : FastMathFlags();
@@ -1203,7 +1203,7 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
12031203
SelectInst *SI = cast<SelectInst>(getUnderlyingValue());
12041204
bool ScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
12051205
Type *ScalarTy = Ctx.Types.inferScalarType(this);
1206-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1206+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
12071207
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12081208

12091209
VPValue *Op0, *Op1;
@@ -1384,7 +1384,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
13841384
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
13851385
switch (Opcode) {
13861386
case Instruction::FNeg: {
1387-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1387+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
13881388
return Ctx.TTI.getArithmeticInstrCost(
13891389
Opcode, VectorTy, CostKind,
13901390
{TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
@@ -1422,7 +1422,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
14221422
if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
14231423
getOperand(1)->isDefinedOutsideLoopRegions())
14241424
RHSInfo.Kind = TargetTransformInfo::OK_UniformValue;
1425-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1425+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
14261426
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
14271427

14281428
SmallVector<const Value *, 4> Operands;
@@ -1435,13 +1435,13 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
14351435
}
14361436
case Instruction::Freeze: {
14371437
// This opcode is unknown. Assume that it is the same as 'mul'.
1438-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1438+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
14391439
return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
14401440
}
14411441
case Instruction::ICmp:
14421442
case Instruction::FCmp: {
14431443
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1444-
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1444+
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
14451445
return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
14461446
CostKind,
14471447
{TTI::OK_AnyValue, TTI::OP_None},
@@ -1569,8 +1569,8 @@ InstructionCost VPWidenCastRecipe::computeCost(ElementCount VF,
15691569
}
15701570

15711571
auto *SrcTy =
1572-
cast<VectorType>(ToVectorTy(Ctx.Types.inferScalarType(Operand), VF));
1573-
auto *DestTy = cast<VectorType>(ToVectorTy(getResultType(), VF));
1572+
cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(Operand), VF));
1573+
auto *DestTy = cast<VectorType>(toVectorTy(getResultType(), VF));
15741574
// Arm TTI will use the underlying instruction to determine the cost.
15751575
return Ctx.TTI.getCastInstrCost(
15761576
Opcode, DestTy, SrcTy, CCH, TTI::TCK_RecipThroughput,
@@ -2078,8 +2078,8 @@ InstructionCost VPBlendRecipe::computeCost(ElementCount VF,
20782078
if (vputils::onlyFirstLaneUsed(this))
20792079
return Ctx.TTI.getCFInstrCost(Instruction::PHI, CostKind);
20802080

2081-
Type *ResultTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
2082-
Type *CmpTy = ToVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
2081+
Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
2082+
Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
20832083
return (getNumIncomingValues() - 1) *
20842084
Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
20852085
CmpInst::BAD_ICMP_PREDICATE, CostKind);
@@ -2200,7 +2200,7 @@ InstructionCost VPReductionRecipe::computeCost(ElementCount VF,
22002200
VPCostContext &Ctx) const {
22012201
RecurKind RdxKind = RdxDesc.getRecurrenceKind();
22022202
Type *ElementTy = Ctx.Types.inferScalarType(this);
2203-
auto *VectorTy = cast<VectorType>(ToVectorTy(ElementTy, VF));
2203+
auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
22042204
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
22052205
unsigned Opcode = RdxDesc.getOpcode();
22062206

@@ -2452,7 +2452,7 @@ void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent,
24522452

24532453
InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
24542454
VPCostContext &Ctx) const {
2455-
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2455+
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
24562456
const Align Alignment =
24572457
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
24582458
unsigned AS =
@@ -2599,7 +2599,7 @@ InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
25992599
// legacy model, it will always calculate the cost of mask.
26002600
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
26012601
// don't need to compare to the legacy cost model.
2602-
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2602+
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
26032603
const Align Alignment =
26042604
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
26052605
unsigned AS =
@@ -2720,7 +2720,7 @@ InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
27202720
// legacy model, it will always calculate the cost of mask.
27212721
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
27222722
// don't need to compare to the legacy cost model.
2723-
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2723+
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
27242724
const Align Alignment =
27252725
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
27262726
unsigned AS =
@@ -3088,7 +3088,7 @@ InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF,
30883088
Type *ValTy = Ctx.Types.inferScalarType(
30893089
getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
30903090
: getStoredValues()[InsertPosIdx]);
3091-
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
3091+
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
30923092
unsigned AS = getLoadStoreAddressSpace(InsertPos);
30933093
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
30943094

@@ -3331,7 +3331,7 @@ VPFirstOrderRecurrencePHIRecipe::computeCost(ElementCount VF,
33313331
SmallVector<int> Mask(VF.getKnownMinValue());
33323332
std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
33333333
Type *VectorTy =
3334-
ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
3334+
toVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
33353335

33363336
return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
33373337
cast<VectorType>(VectorTy), Mask, CostKind,

0 commit comments

Comments
 (0)