@@ -6072,24 +6072,23 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6072
6072
RetTy = IntegerType::get (RetTy->getContext (), MinBWs[I]);
6073
6073
auto *SE = PSE.getSE ();
6074
6074
6075
- auto HasSingleCopyAfterVectorization = [this ](Instruction *I,
6076
- ElementCount VF) -> bool {
6077
- if (VF.isScalar ())
6078
- return true ;
6079
-
6080
- auto Scalarized = InstsToScalarize.find (VF);
6081
- assert (Scalarized != InstsToScalarize.end () &&
6082
- " VF not yet analyzed for scalarization profitability" );
6083
- return !Scalarized->second .count (I) &&
6084
- llvm::all_of (I->users (), [&](User *U) {
6085
- auto *UI = cast<Instruction>(U);
6086
- return !Scalarized->second .count (UI);
6087
- });
6088
- };
6089
- (void )HasSingleCopyAfterVectorization;
6090
-
6091
6075
Type *VectorTy;
6092
6076
if (isScalarAfterVectorization (I, VF)) {
6077
+ [[maybe_unused]] auto HasSingleCopyAfterVectorization =
6078
+ [this ](Instruction *I, ElementCount VF) -> bool {
6079
+ if (VF.isScalar ())
6080
+ return true ;
6081
+
6082
+ auto Scalarized = InstsToScalarize.find (VF);
6083
+ assert (Scalarized != InstsToScalarize.end () &&
6084
+ " VF not yet analyzed for scalarization profitability" );
6085
+ return !Scalarized->second .count (I) &&
6086
+ llvm::all_of (I->users (), [&](User *U) {
6087
+ auto *UI = cast<Instruction>(U);
6088
+ return !Scalarized->second .count (UI);
6089
+ });
6090
+ };
6091
+
6093
6092
// With the exception of GEPs and PHIs, after scalarization there should
6094
6093
// only be one copy of the instruction generated in the loop. This is
6095
6094
// because the VF is either 1, or any instructions that need scalarizing
@@ -6348,8 +6347,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6348
6347
Type *ValTy = I->getOperand (0 )->getType ();
6349
6348
6350
6349
if (canTruncateToMinimalBitwidth (I, VF)) {
6351
- Instruction *Op0AsInstruction = dyn_cast<Instruction>(I-> getOperand ( 0 ));
6352
- ( void )Op0AsInstruction ;
6350
+ [[maybe_unused]] Instruction *Op0AsInstruction =
6351
+ dyn_cast<Instruction>(I-> getOperand ( 0 )) ;
6353
6352
assert ((!canTruncateToMinimalBitwidth (Op0AsInstruction, VF) ||
6354
6353
MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6355
6354
" if both the operand and the compare are marked for "
@@ -7283,8 +7282,8 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
7283
7282
MainResumeValue = EpiRedHeaderPhi->getStartValue ()->getUnderlyingValue ();
7284
7283
if (RecurrenceDescriptor::isAnyOfRecurrenceKind (
7285
7284
RdxDesc.getRecurrenceKind ())) {
7286
- Value *StartV = EpiRedResult-> getOperand ( 1 )-> getLiveInIRValue ();
7287
- ( void )StartV ;
7285
+ [[maybe_unused]] Value *StartV =
7286
+ EpiRedResult-> getOperand ( 1 )-> getLiveInIRValue () ;
7288
7287
auto *Cmp = cast<ICmpInst>(MainResumeValue);
7289
7288
assert (Cmp->getPredicate () == CmpInst::ICMP_NE &&
7290
7289
" AnyOf expected to start with ICMP_NE" );
@@ -7298,15 +7297,14 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
7298
7297
Value *SentinelV = EpiRedResult->getOperand (2 )->getLiveInIRValue ();
7299
7298
using namespace llvm ::PatternMatch;
7300
7299
Value *Cmp, *OrigResumeV, *CmpOp;
7301
- bool IsExpectedPattern =
7300
+ [[maybe_unused]] bool IsExpectedPattern =
7302
7301
match (MainResumeValue,
7303
7302
m_Select (m_OneUse (m_Value (Cmp)), m_Specific (SentinelV),
7304
7303
m_Value (OrigResumeV))) &&
7305
7304
(match (Cmp, m_SpecificICmp (ICmpInst::ICMP_EQ, m_Specific (OrigResumeV),
7306
7305
m_Value (CmpOp))) &&
7307
7306
((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison (CmpOp))));
7308
7307
assert (IsExpectedPattern && " Unexpected reduction resume pattern" );
7309
- (void )IsExpectedPattern;
7310
7308
MainResumeValue = OrigResumeV;
7311
7309
}
7312
7310
PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
@@ -8260,10 +8258,10 @@ VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(VPSingleDefRecipe *R,
8260
8258
SmallVector<VPValue *, 4 > Operands (R->operands ());
8261
8259
if (auto *PhiR = dyn_cast<VPWidenPHIRecipe>(R)) {
8262
8260
VPBasicBlock *Parent = PhiR->getParent ();
8263
- VPRegionBlock *LoopRegionOf = Parent->getEnclosingLoopRegion ();
8261
+ [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8262
+ Parent->getEnclosingLoopRegion ();
8264
8263
assert (LoopRegionOf && LoopRegionOf->getEntry () == Parent &&
8265
8264
" Non-header phis should have been handled during predication" );
8266
- (void )LoopRegionOf;
8267
8265
auto *Phi = cast<PHINode>(R->getUnderlyingInstr ());
8268
8266
assert (Operands.size () == 2 && " Must have 2 operands for header phis" );
8269
8267
if ((Recipe = tryToOptimizeInductionPHI (Phi, Operands, Range)))
0 commit comments