Skip to content

[LV] Rename ToVectorTy to toVectorTy (NFC) #120404

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions llvm/include/llvm/IR/VectorTypeUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ namespace llvm {
/// A helper function for converting Scalar types to vector types. If
/// the incoming type is void, we return void. If the EC represents a
/// scalar, we return the scalar type.
inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
inline Type *toVectorTy(Type *Scalar, ElementCount EC) {
if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
return Scalar;
return VectorType::get(Scalar, EC);
}

inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
return ToVectorTy(Scalar, ElementCount::getFixed(VF));
inline Type *toVectorTy(Type *Scalar, unsigned VF) {
return toVectorTy(Scalar, ElementCount::getFixed(VF));
}

/// A helper for converting structs of scalar types to structs of vector types.
Expand All @@ -41,7 +41,7 @@ Type *toScalarizedStructTy(StructType *StructTy);
bool isVectorizedStructTy(StructType *StructTy);

/// A helper for converting to vectorized types. For scalar types, this is
/// equivalent to calling `ToVectorTy`. For struct types, this returns a new
/// equivalent to calling `toVectorTy`. For struct types, this returns a new
/// struct where each element type has been widened to a vector type.
/// Note:
/// - If the incoming type is void, we return void
Expand All @@ -50,7 +50,7 @@ bool isVectorizedStructTy(StructType *StructTy);
inline Type *toVectorizedTy(Type *Ty, ElementCount EC) {
if (StructType *StructTy = dyn_cast<StructType>(Ty))
return toVectorizedStructTy(StructTy, EC);
return ToVectorTy(Ty, EC);
return toVectorTy(Ty, EC);
}

/// A helper for converting vectorized types to scalarized (non-vector) types.
Expand Down
58 changes: 29 additions & 29 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1251,8 +1251,8 @@ class LoopVectorizationCostModel {
return false;

// Get the source and destination types of the truncate.
Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
Type *SrcTy = toVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
Type *DestTy = toVectorTy(cast<CastInst>(I)->getDestTy(), VF);

// If the truncate is free for the given types, return false. Replacing a
// free truncate with an induction variable would add an induction variable
Expand Down Expand Up @@ -3526,14 +3526,14 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
}
InstructionCost SafeDivisorCost = 0;

auto *VecTy = ToVectorTy(I->getType(), VF);
auto *VecTy = toVectorTy(I->getType(), VF);

// The cost of the select guard to ensure all lanes are well defined
// after we speculate above any internal control flow.
SafeDivisorCost += TTI.getCmpSelInstrCost(
Instruction::Select, VecTy,
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
CmpInst::BAD_ICMP_PREDICATE, CostKind);
SafeDivisorCost +=
TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
CmpInst::BAD_ICMP_PREDICATE, CostKind);

// Certain instructions can be cheaper to vectorize if they have a constant
// second vector operand. One example of this are shifts on x86.
Expand Down Expand Up @@ -4654,7 +4654,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
}

auto WillWiden = [&TTI, VF](Type *ScalarTy) {
Type *VectorTy = ToVectorTy(ScalarTy, VF);
Type *VectorTy = toVectorTy(ScalarTy, VF);
unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
if (!NumLegalParts)
return false;
Expand Down Expand Up @@ -5645,7 +5645,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
ScalarCost += TTI.getScalarizationOverhead(
cast<VectorType>(ToVectorTy(I->getType(), VF)),
cast<VectorType>(toVectorTy(I->getType(), VF)),
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ true,
/*Extract*/ false, CostKind);
ScalarCost +=
Expand All @@ -5664,7 +5664,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
Worklist.push_back(J);
else if (needsExtract(J, VF)) {
ScalarCost += TTI.getScalarizationOverhead(
cast<VectorType>(ToVectorTy(J->getType(), VF)),
cast<VectorType>(toVectorTy(J->getType(), VF)),
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
/*Extract*/ true, CostKind);
}
Expand Down Expand Up @@ -5775,7 +5775,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,

unsigned AS = getLoadStoreAddressSpace(I);
Value *Ptr = getLoadStorePointerOperand(I);
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
Type *PtrTy = toVectorTy(Ptr->getType(), VF);
// NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
// that it is being called from this specific place.

Expand Down Expand Up @@ -5826,7 +5826,7 @@ InstructionCost
LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
ElementCount VF) {
Type *ValTy = getLoadStoreType(I);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
Value *Ptr = getLoadStorePointerOperand(I);
unsigned AS = getLoadStoreAddressSpace(I);
int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
Expand Down Expand Up @@ -5858,7 +5858,7 @@ LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
assert(Legal->isUniformMemOp(*I, VF));

Type *ValTy = getLoadStoreType(I);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
const Align Alignment = getLoadStoreAlignment(I);
unsigned AS = getLoadStoreAddressSpace(I);
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
Expand All @@ -5884,7 +5884,7 @@ InstructionCost
LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
ElementCount VF) {
Type *ValTy = getLoadStoreType(I);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
const Align Alignment = getLoadStoreAlignment(I);
const Value *Ptr = getLoadStorePointerOperand(I);

Expand All @@ -5902,7 +5902,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,

Instruction *InsertPos = Group->getInsertPos();
Type *ValTy = getLoadStoreType(InsertPos);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
unsigned AS = getLoadStoreAddressSpace(InsertPos);
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;

Expand Down Expand Up @@ -6147,7 +6147,7 @@ InstructionCost LoopVectorizationCostModel::getScalarizationOverhead(
return 0;

InstructionCost Cost = 0;
Type *RetTy = ToVectorTy(I->getType(), VF);
Type *RetTy = toVectorTy(I->getType(), VF);
if (!RetTy->isVoidTy() &&
(!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
Cost += TTI.getScalarizationOverhead(
Expand Down Expand Up @@ -6413,9 +6413,9 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) {

bool MaskRequired = Legal->isMaskRequired(CI);
// Compute corresponding vector type for return value and arguments.
Type *RetTy = ToVectorTy(ScalarRetTy, VF);
Type *RetTy = toVectorTy(ScalarRetTy, VF);
for (Type *ScalarTy : ScalarTys)
Tys.push_back(ToVectorTy(ScalarTy, VF));
Tys.push_back(toVectorTy(ScalarTy, VF));

// An in-loop reduction using an fmuladd intrinsic is a special case;
// we don't want the normal cost for that intrinsic.
Expand Down Expand Up @@ -6605,7 +6605,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
HasSingleCopyAfterVectorization(I, VF));
VectorTy = RetTy;
} else
VectorTy = ToVectorTy(RetTy, VF);
VectorTy = toVectorTy(RetTy, VF);

if (VF.isVector() && VectorTy->isVectorTy() &&
!TTI.getNumberOfParts(VectorTy))
Expand Down Expand Up @@ -6665,8 +6665,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
return Switch->getNumCases() *
TTI.getCmpSelInstrCost(
Instruction::ICmp,
ToVectorTy(Switch->getCondition()->getType(), VF),
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
toVectorTy(Switch->getCondition()->getType(), VF),
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
CmpInst::ICMP_EQ, CostKind);
}
case Instruction::PHI: {
Expand Down Expand Up @@ -6711,8 +6711,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
}
return (Phi->getNumIncomingValues() - 1) *
TTI.getCmpSelInstrCost(
Instruction::Select, ToVectorTy(ResultTy, VF),
ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
Instruction::Select, toVectorTy(ResultTy, VF),
toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
CmpInst::BAD_ICMP_PREDICATE, CostKind);
}

Expand All @@ -6721,8 +6721,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
if (VF.isVector() && foldTailWithEVL() &&
Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
IntrinsicCostAttributes ICA(
Intrinsic::vp_merge, ToVectorTy(Phi->getType(), VF),
{ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
{toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
return TTI.getIntrinsicInstrCost(ICA, CostKind);
}

Expand Down Expand Up @@ -6862,7 +6862,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
}

VectorTy = ToVectorTy(ValTy, VF);
VectorTy = toVectorTy(ValTy, VF);
return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
cast<CmpInst>(I)->getPredicate(), CostKind,
{TTI::OK_AnyValue, TTI::OP_None},
Expand All @@ -6880,7 +6880,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
if (Decision == CM_Scalarize)
Width = ElementCount::getFixed(1);
}
VectorTy = ToVectorTy(getLoadStoreType(I), Width);
VectorTy = toVectorTy(getLoadStoreType(I), Width);
return getMemoryInstructionCost(I, VF);
}
case Instruction::BitCast:
Expand Down Expand Up @@ -6961,7 +6961,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
SrcScalarTy =
IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
Type *SrcVecTy =
VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;

if (canTruncateToMinimalBitwidth(I, VF)) {
// If the result type is <= the source type, there will be no extend
Expand Down Expand Up @@ -7490,7 +7490,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
// Pre-compute the cost for I, if it has a reduction pattern cost.
for (Instruction *I : ChainOpsAndOperands) {
auto ReductionCost = CM.getReductionPatternCost(
I, VF, ToVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
I, VF, toVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
if (!ReductionCost)
continue;

Expand Down
34 changes: 17 additions & 17 deletions llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1030,11 +1030,11 @@ InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
Arguments.push_back(V);
}

Type *RetTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *RetTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
SmallVector<Type *> ParamTys;
for (unsigned I = 0; I != getNumOperands(); ++I)
ParamTys.push_back(
ToVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
toVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));

// TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
FastMathFlags FMF = hasFastMathFlags() ? getFastMathFlags() : FastMathFlags();
Expand Down Expand Up @@ -1202,7 +1202,7 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
SelectInst *SI = cast<SelectInst>(getUnderlyingValue());
bool ScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
Type *ScalarTy = Ctx.Types.inferScalarType(this);
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;

VPValue *Op0, *Op1;
Expand Down Expand Up @@ -1383,7 +1383,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
switch (Opcode) {
case Instruction::FNeg: {
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
return Ctx.TTI.getArithmeticInstrCost(
Opcode, VectorTy, CostKind,
{TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
Expand Down Expand Up @@ -1421,7 +1421,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
getOperand(1)->isDefinedOutsideLoopRegions())
RHSInfo.Kind = TargetTransformInfo::OK_UniformValue;
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());

SmallVector<const Value *, 4> Operands;
Expand All @@ -1434,13 +1434,13 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
}
case Instruction::Freeze: {
// This opcode is unknown. Assume that it is the same as 'mul'.
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
CostKind,
{TTI::OK_AnyValue, TTI::OP_None},
Expand Down Expand Up @@ -1568,8 +1568,8 @@ InstructionCost VPWidenCastRecipe::computeCost(ElementCount VF,
}

auto *SrcTy =
cast<VectorType>(ToVectorTy(Ctx.Types.inferScalarType(Operand), VF));
auto *DestTy = cast<VectorType>(ToVectorTy(getResultType(), VF));
cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(Operand), VF));
auto *DestTy = cast<VectorType>(toVectorTy(getResultType(), VF));
// Arm TTI will use the underlying instruction to determine the cost.
return Ctx.TTI.getCastInstrCost(
Opcode, DestTy, SrcTy, CCH, TTI::TCK_RecipThroughput,
Expand Down Expand Up @@ -2077,8 +2077,8 @@ InstructionCost VPBlendRecipe::computeCost(ElementCount VF,
if (vputils::onlyFirstLaneUsed(this))
return Ctx.TTI.getCFInstrCost(Instruction::PHI, CostKind);

Type *ResultTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *CmpTy = ToVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
return (getNumIncomingValues() - 1) *
Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
CmpInst::BAD_ICMP_PREDICATE, CostKind);
Expand Down Expand Up @@ -2199,7 +2199,7 @@ InstructionCost VPReductionRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
RecurKind RdxKind = RdxDesc.getRecurrenceKind();
Type *ElementTy = Ctx.Types.inferScalarType(this);
auto *VectorTy = cast<VectorType>(ToVectorTy(ElementTy, VF));
auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
unsigned Opcode = RdxDesc.getOpcode();

Expand Down Expand Up @@ -2450,7 +2450,7 @@ void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent,

InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
const Align Alignment =
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
unsigned AS =
Expand Down Expand Up @@ -2597,7 +2597,7 @@ InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
// legacy model, it will always calculate the cost of mask.
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
// don't need to compare to the legacy cost model.
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
const Align Alignment =
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
unsigned AS =
Expand Down Expand Up @@ -2718,7 +2718,7 @@ InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
// legacy model, it will always calculate the cost of mask.
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
// don't need to compare to the legacy cost model.
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
const Align Alignment =
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
unsigned AS =
Expand Down Expand Up @@ -3086,7 +3086,7 @@ InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF,
Type *ValTy = Ctx.Types.inferScalarType(
getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
: getStoredValues()[InsertPosIdx]);
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
unsigned AS = getLoadStoreAddressSpace(InsertPos);
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;

Expand Down Expand Up @@ -3347,7 +3347,7 @@ VPFirstOrderRecurrencePHIRecipe::computeCost(ElementCount VF,
SmallVector<int> Mask(VF.getKnownMinValue());
std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
Type *VectorTy =
ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
toVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);

return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
cast<VectorType>(VectorTy), Mask, CostKind,
Expand Down
Loading