@@ -1410,8 +1410,7 @@ class LoopVectorizationCostModel {
1410
1410
// / Returns true if \p I is an instruction that needs to be predicated
1411
1411
// / at runtime. The result is independent of the predication mechanism.
1412
1412
// / Superset of instructions that return true for isScalarWithPredication.
1413
- bool isPredicatedInst (Instruction *I, ElementCount VF,
1414
- bool IsKnownUniform = false ) const ;
1413
+ bool isPredicatedInst (Instruction *I) const ;
1415
1414
1416
1415
// / Return the costs for our two available strategies for lowering a
1417
1416
// / div/rem operation which requires speculating at least one lane.
@@ -3650,7 +3649,7 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
3650
3649
3651
3650
bool LoopVectorizationCostModel::isScalarWithPredication (
3652
3651
Instruction *I, ElementCount VF) const {
3653
- if (!isPredicatedInst (I, VF ))
3652
+ if (!isPredicatedInst (I))
3654
3653
return false ;
3655
3654
3656
3655
// Do we have a non-scalar lowering for this predicated
@@ -3689,9 +3688,7 @@ bool LoopVectorizationCostModel::isScalarWithPredication(
3689
3688
}
3690
3689
}
3691
3690
3692
- bool LoopVectorizationCostModel::isPredicatedInst (Instruction *I,
3693
- ElementCount VF,
3694
- bool IsKnownUniform) const {
3691
+ bool LoopVectorizationCostModel::isPredicatedInst (Instruction *I) const {
3695
3692
if (!blockNeedsPredicationForAnyReason (I->getParent ()))
3696
3693
return false ;
3697
3694
@@ -3939,7 +3936,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
3939
3936
<< *I << " \n " );
3940
3937
return ;
3941
3938
}
3942
- if (isPredicatedInst (I, VF, true )) {
3939
+ if (isPredicatedInst (I)) {
3943
3940
LLVM_DEBUG (
3944
3941
dbgs () << " LV: Found not uniform due to requiring predication: " << *I
3945
3942
<< " \n " );
@@ -5674,7 +5671,7 @@ bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
5674
5671
// from moving "masked load/store" check from legality to cost model.
5675
5672
// Masked Load/Gather emulation was previously never allowed.
5676
5673
// Limited number of Masked Store/Scatter emulation was allowed.
5677
- assert ((isPredicatedInst (I, VF )) &&
5674
+ assert ((isPredicatedInst (I)) &&
5678
5675
" Expecting a scalar emulated instruction" );
5679
5676
return isa<LoadInst>(I) ||
5680
5677
(isa<StoreInst>(I) &&
@@ -5953,7 +5950,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5953
5950
// If we have a predicated load/store, it will need extra i1 extracts and
5954
5951
// conditional branches, but may not be executed for each vector lane. Scale
5955
5952
// the cost by the probability of executing the predicated block.
5956
- if (isPredicatedInst (I, VF )) {
5953
+ if (isPredicatedInst (I)) {
5957
5954
Cost /= getReciprocalPredBlockProb ();
5958
5955
5959
5956
// Add the cost of an i1 extract and a branch
@@ -6813,7 +6810,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6813
6810
case Instruction::SDiv:
6814
6811
case Instruction::URem:
6815
6812
case Instruction::SRem:
6816
- if (VF.isVector () && isPredicatedInst (I, VF )) {
6813
+ if (VF.isVector () && isPredicatedInst (I)) {
6817
6814
const auto [ScalarCost, SafeDivisorCost] = getDivRemSpeculationCost (I, VF);
6818
6815
return isDivRemScalarWithPredication (ScalarCost, SafeDivisorCost) ?
6819
6816
ScalarCost : SafeDivisorCost;
@@ -8485,7 +8482,7 @@ bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8485
8482
8486
8483
VPWidenRecipe *VPRecipeBuilder::tryToWiden (Instruction *I,
8487
8484
ArrayRef<VPValue *> Operands,
8488
- VPBasicBlock *VPBB, VFRange &Range ) {
8485
+ VPBasicBlock *VPBB) {
8489
8486
switch (I->getOpcode ()) {
8490
8487
default :
8491
8488
return nullptr ;
@@ -8495,10 +8492,7 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8495
8492
case Instruction::URem: {
8496
8493
// If not provably safe, use a select to form a safe divisor before widening the
8497
8494
// div/rem operation itself. Otherwise fall through to general handling below.
8498
- bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange (
8499
- [&](ElementCount VF) -> bool { return CM.isPredicatedInst (I, VF); },
8500
- Range);
8501
- if (IsPredicated) {
8495
+ if (CM.isPredicatedInst (I)) {
8502
8496
SmallVector<VPValue *> Ops (Operands.begin (), Operands.end ());
8503
8497
VPValue *Mask = getBlockInMask (I->getParent ());
8504
8498
VPValue *One =
@@ -8548,8 +8542,8 @@ VPReplicateRecipe *VPRecipeBuilder::handleReplication(Instruction *I,
8548
8542
[&](ElementCount VF) { return CM.isUniformAfterVectorization (I, VF); },
8549
8543
Range);
8550
8544
8551
- bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange (
8552
- [&](ElementCount VF) { return CM. isPredicatedInst (I, VF); }, Range);
8545
+ bool IsPredicated = CM. isPredicatedInst (I);
8546
+
8553
8547
// Even if the instruction is not marked as uniform, there are certain
8554
8548
// intrinsic calls that can be effectively treated as such, so we check for
8555
8549
// them here. Conservatively, we only do this for scalable vectors, since
@@ -8675,7 +8669,7 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8675
8669
*CI);
8676
8670
}
8677
8671
8678
- return tryToWiden (Instr, Operands, VPBB, Range );
8672
+ return tryToWiden (Instr, Operands, VPBB);
8679
8673
}
8680
8674
8681
8675
void LoopVectorizationPlanner::buildVPlansWithVPRecipes (ElementCount MinVF,
0 commit comments