@@ -6675,24 +6675,23 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6675
6675
RetTy = IntegerType::get (RetTy->getContext (), MinBWs[I]);
6676
6676
auto *SE = PSE.getSE ();
6677
6677
6678
- auto HasSingleCopyAfterVectorization = [this ](Instruction *I,
6679
- ElementCount VF) -> bool {
6680
- if (VF.isScalar ())
6681
- return true ;
6682
-
6683
- auto Scalarized = InstsToScalarize.find (VF);
6684
- assert (Scalarized != InstsToScalarize.end () &&
6685
- " VF not yet analyzed for scalarization profitability" );
6686
- return !Scalarized->second .count (I) &&
6687
- llvm::all_of (I->users (), [&](User *U) {
6688
- auto *UI = cast<Instruction>(U);
6689
- return !Scalarized->second .count (UI);
6690
- });
6691
- };
6692
- (void )HasSingleCopyAfterVectorization;
6693
-
6694
6678
Type *VectorTy;
6695
6679
if (isScalarAfterVectorization (I, VF)) {
6680
+ [[maybe_unused]] auto HasSingleCopyAfterVectorization =
6681
+ [this ](Instruction *I, ElementCount VF) -> bool {
6682
+ if (VF.isScalar ())
6683
+ return true ;
6684
+
6685
+ auto Scalarized = InstsToScalarize.find (VF);
6686
+ assert (Scalarized != InstsToScalarize.end () &&
6687
+ " VF not yet analyzed for scalarization profitability" );
6688
+ return !Scalarized->second .count (I) &&
6689
+ llvm::all_of (I->users (), [&](User *U) {
6690
+ auto *UI = cast<Instruction>(U);
6691
+ return !Scalarized->second .count (UI);
6692
+ });
6693
+ };
6694
+
6696
6695
// With the exception of GEPs and PHIs, after scalarization there should
6697
6696
// only be one copy of the instruction generated in the loop. This is
6698
6697
// because the VF is either 1, or any instructions that need scalarizing
@@ -6956,8 +6955,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6956
6955
Type *ValTy = I->getOperand (0 )->getType ();
6957
6956
6958
6957
if (canTruncateToMinimalBitwidth (I, VF)) {
6959
- Instruction *Op0AsInstruction = dyn_cast<Instruction>(I-> getOperand ( 0 ));
6960
- ( void )Op0AsInstruction ;
6958
+ [[maybe_unused]] Instruction *Op0AsInstruction =
6959
+ dyn_cast<Instruction>(I-> getOperand ( 0 )) ;
6961
6960
assert ((!canTruncateToMinimalBitwidth (Op0AsInstruction, VF) ||
6962
6961
MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6963
6962
" if both the operand and the compare are marked for "
@@ -7895,7 +7894,7 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
7895
7894
RdxDesc.getRecurrenceKind ())) {
7896
7895
using namespace llvm ::PatternMatch;
7897
7896
Value *Cmp, *OrigResumeV, *CmpOp;
7898
- bool IsExpectedPattern =
7897
+ [[maybe_unused]] bool IsExpectedPattern =
7899
7898
match (MainResumeValue, m_Select (m_OneUse (m_Value (Cmp)),
7900
7899
m_Specific (RdxDesc.getSentinelValue ()),
7901
7900
m_Value (OrigResumeV))) &&
@@ -7906,7 +7905,6 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
7906
7905
(CmpOp == RdxDesc.getRecurrenceStartValue () &&
7907
7906
isGuaranteedNotToBeUndefOrPoison (CmpOp))));
7908
7907
assert (IsExpectedPattern && " Unexpected reduction resume pattern" );
7909
- (void )IsExpectedPattern;
7910
7908
MainResumeValue = OrigResumeV;
7911
7909
}
7912
7910
PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
0 commit comments