@@ -6065,24 +6065,23 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6065
6065
RetTy = IntegerType::get (RetTy->getContext (), MinBWs[I]);
6066
6066
auto *SE = PSE.getSE ();
6067
6067
6068
- auto HasSingleCopyAfterVectorization = [this ](Instruction *I,
6069
- ElementCount VF) -> bool {
6070
- if (VF.isScalar ())
6071
- return true ;
6072
-
6073
- auto Scalarized = InstsToScalarize.find (VF);
6074
- assert (Scalarized != InstsToScalarize.end () &&
6075
- " VF not yet analyzed for scalarization profitability" );
6076
- return !Scalarized->second .count (I) &&
6077
- llvm::all_of (I->users (), [&](User *U) {
6078
- auto *UI = cast<Instruction>(U);
6079
- return !Scalarized->second .count (UI);
6080
- });
6081
- };
6082
- (void )HasSingleCopyAfterVectorization;
6083
-
6084
6068
Type *VectorTy;
6085
6069
if (isScalarAfterVectorization (I, VF)) {
6070
+ [[maybe_unused]] auto HasSingleCopyAfterVectorization =
6071
+ [this ](Instruction *I, ElementCount VF) -> bool {
6072
+ if (VF.isScalar ())
6073
+ return true ;
6074
+
6075
+ auto Scalarized = InstsToScalarize.find (VF);
6076
+ assert (Scalarized != InstsToScalarize.end () &&
6077
+ " VF not yet analyzed for scalarization profitability" );
6078
+ return !Scalarized->second .count (I) &&
6079
+ llvm::all_of (I->users (), [&](User *U) {
6080
+ auto *UI = cast<Instruction>(U);
6081
+ return !Scalarized->second .count (UI);
6082
+ });
6083
+ };
6084
+
6086
6085
// With the exception of GEPs and PHIs, after scalarization there should
6087
6086
// only be one copy of the instruction generated in the loop. This is
6088
6087
// because the VF is either 1, or any instructions that need scalarizing
@@ -6342,8 +6341,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6342
6341
Type *ValTy = I->getOperand (0 )->getType ();
6343
6342
6344
6343
if (canTruncateToMinimalBitwidth (I, VF)) {
6345
- Instruction *Op0AsInstruction = dyn_cast<Instruction>(I-> getOperand ( 0 ));
6346
- ( void )Op0AsInstruction ;
6344
+ [[maybe_unused]] Instruction *Op0AsInstruction =
6345
+ dyn_cast<Instruction>(I-> getOperand ( 0 )) ;
6347
6346
assert ((!canTruncateToMinimalBitwidth (Op0AsInstruction, VF) ||
6348
6347
MinBWs[I] == MinBWs[Op0AsInstruction]) &&
6349
6348
" if both the operand and the compare are marked for "
@@ -7277,8 +7276,8 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
7277
7276
MainResumeValue = EpiRedHeaderPhi->getStartValue ()->getUnderlyingValue ();
7278
7277
if (RecurrenceDescriptor::isAnyOfRecurrenceKind (
7279
7278
RdxDesc.getRecurrenceKind ())) {
7280
- Value *StartV = EpiRedResult-> getOperand ( 1 )-> getLiveInIRValue ();
7281
- ( void )StartV ;
7279
+ [[maybe_unused]] Value *StartV =
7280
+ EpiRedResult-> getOperand ( 1 )-> getLiveInIRValue () ;
7282
7281
auto *Cmp = cast<ICmpInst>(MainResumeValue);
7283
7282
assert (Cmp->getPredicate () == CmpInst::ICMP_NE &&
7284
7283
" AnyOf expected to start with ICMP_NE" );
@@ -7292,15 +7291,14 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
7292
7291
Value *SentinelV = EpiRedResult->getOperand (2 )->getLiveInIRValue ();
7293
7292
using namespace llvm ::PatternMatch;
7294
7293
Value *Cmp, *OrigResumeV, *CmpOp;
7295
- bool IsExpectedPattern =
7294
+ [[maybe_unused]] bool IsExpectedPattern =
7296
7295
match (MainResumeValue,
7297
7296
m_Select (m_OneUse (m_Value (Cmp)), m_Specific (SentinelV),
7298
7297
m_Value (OrigResumeV))) &&
7299
7298
(match (Cmp, m_SpecificICmp (ICmpInst::ICMP_EQ, m_Specific (OrigResumeV),
7300
7299
m_Value (CmpOp))) &&
7301
7300
((CmpOp == StartV && isGuaranteedNotToBeUndefOrPoison (CmpOp))));
7302
7301
assert (IsExpectedPattern && " Unexpected reduction resume pattern" );
7303
- (void )IsExpectedPattern;
7304
7302
MainResumeValue = OrigResumeV;
7305
7303
}
7306
7304
PHINode *MainResumePhi = cast<PHINode>(MainResumeValue);
@@ -8255,10 +8253,10 @@ VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(VPSingleDefRecipe *R,
8255
8253
SmallVector<VPValue *, 4 > Operands (R->operands ());
8256
8254
if (auto *PhiR = dyn_cast<VPWidenPHIRecipe>(R)) {
8257
8255
VPBasicBlock *Parent = PhiR->getParent ();
8258
- VPRegionBlock *LoopRegionOf = Parent->getEnclosingLoopRegion ();
8256
+ [[maybe_unused]] VPRegionBlock *LoopRegionOf =
8257
+ Parent->getEnclosingLoopRegion ();
8259
8258
assert (LoopRegionOf && LoopRegionOf->getEntry () == Parent &&
8260
8259
" Non-header phis should have been handled during predication" );
8261
- (void )LoopRegionOf;
8262
8260
auto *Phi = cast<PHINode>(R->getUnderlyingInstr ());
8263
8261
assert (Operands.size () == 2 && " Must have 2 operands for header phis" );
8264
8262
if ((Recipe = tryToOptimizeInductionPHI (Phi, Operands, Range)))
0 commit comments