@@ -101,16 +101,16 @@ Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
101
101
// / (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
102
102
// / (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
103
103
enum MaskedICmpType {
104
- AMask_AllOnes = 1 ,
105
- AMask_NotAllOnes = 2 ,
106
- BMask_AllOnes = 4 ,
107
- BMask_NotAllOnes = 8 ,
108
- Mask_AllZeros = 16 ,
109
- Mask_NotAllZeros = 32 ,
110
- AMask_Mixed = 64 ,
111
- AMask_NotMixed = 128 ,
112
- BMask_Mixed = 256 ,
113
- BMask_NotMixed = 512
104
+ AMask_AllOnes = 1 ,
105
+ AMask_NotAllOnes = 2 ,
106
+ BMask_AllOnes = 4 ,
107
+ BMask_NotAllOnes = 8 ,
108
+ Mask_AllZeros = 16 ,
109
+ Mask_NotAllZeros = 32 ,
110
+ AMask_Mixed = 64 ,
111
+ AMask_NotMixed = 128 ,
112
+ BMask_Mixed = 256 ,
113
+ BMask_NotMixed = 512
114
114
};
115
115
116
116
// / Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
@@ -172,15 +172,16 @@ static unsigned conjugateICmpMask(unsigned Mask) {
172
172
<< 1 ;
173
173
174
174
NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
175
- AMask_NotMixed | BMask_NotMixed))
176
- >> 1 ;
175
+ AMask_NotMixed | BMask_NotMixed)) >>
176
+ 1 ;
177
177
178
178
return NewMask;
179
179
}
180
180
181
181
// Adapts the external decomposeBitTestICmp for local use.
182
- static bool decomposeBitTestICmp (Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
183
- Value *&X, Value *&Y, Value *&Z) {
182
+ static bool decomposeBitTestICmp (Value *LHS, Value *RHS,
183
+ CmpInst::Predicate &Pred, Value *&X, Value *&Y,
184
+ Value *&Z) {
184
185
APInt Mask;
185
186
if (!llvm::decomposeBitTestICmp (LHS, RHS, Pred, X, Mask))
186
187
return false ;
@@ -519,9 +520,9 @@ static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
519
520
if (Mask == 0 ) {
520
521
// Even if the two sides don't share a common pattern, check if folding can
521
522
// still happen.
522
- if (Value *V = foldLogOpOfMaskedICmpsAsymmetric (
523
- LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask ,
524
- Builder))
523
+ if (Value *V = foldLogOpOfMaskedICmpsAsymmetric (LHS, RHS, IsAnd, A, B, C, D,
524
+ E, PredL, PredR, LHSMask,
525
+ RHSMask, Builder))
525
526
return V;
526
527
return nullptr ;
527
528
}
@@ -680,16 +681,16 @@ Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
680
681
if (!RangeStart)
681
682
return nullptr ;
682
683
683
- ICmpInst::Predicate Pred0 = (Inverted ? Cmp0-> getInversePredicate () :
684
- Cmp0->getPredicate ());
684
+ ICmpInst::Predicate Pred0 =
685
+ (Inverted ? Cmp0-> getInversePredicate () : Cmp0->getPredicate ());
685
686
686
687
// Accept x > -1 or x >= 0 (after potentially inverting the predicate).
687
688
if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne ()) ||
688
689
(Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero ())))
689
690
return nullptr ;
690
691
691
- ICmpInst::Predicate Pred1 = (Inverted ? Cmp1-> getInversePredicate () :
692
- Cmp1->getPredicate ());
692
+ ICmpInst::Predicate Pred1 =
693
+ (Inverted ? Cmp1-> getInversePredicate () : Cmp1->getPredicate ());
693
694
694
695
Value *Input = Cmp0->getOperand (0 );
695
696
Value *RangeEnd;
@@ -707,9 +708,14 @@ Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
707
708
// Check the upper range comparison, e.g. x < n
708
709
ICmpInst::Predicate NewPred;
709
710
switch (Pred1) {
710
- case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break ;
711
- case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break ;
712
- default : return nullptr ;
711
+ case ICmpInst::ICMP_SLT:
712
+ NewPred = ICmpInst::ICMP_ULT;
713
+ break ;
714
+ case ICmpInst::ICMP_SLE:
715
+ NewPred = ICmpInst::ICMP_ULE;
716
+ break ;
717
+ default :
718
+ return nullptr ;
713
719
}
714
720
715
721
// This simplification is only valid if the upper range is not negative.
@@ -785,8 +791,7 @@ Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS,
785
791
if (L2 == R1)
786
792
std::swap (L1, L2);
787
793
788
- if (L1 == R1 &&
789
- isKnownToBeAPowerOfTwo (L2, false , 0 , CxtI) &&
794
+ if (L1 == R1 && isKnownToBeAPowerOfTwo (L2, false , 0 , CxtI) &&
790
795
isKnownToBeAPowerOfTwo (R2, false , 0 , CxtI)) {
791
796
// If this is a logical and/or, then we must prevent propagation of a
792
797
// poison value from the RHS by inserting freeze.
@@ -1636,8 +1641,8 @@ static Instruction *reassociateFCmps(BinaryOperator &BO,
1636
1641
1637
1642
// Match inner binop and the predicate for combining 2 NAN checks into 1.
1638
1643
Value *BO10, *BO11;
1639
- FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1640
- : FCmpInst::FCMP_UNO;
1644
+ FCmpInst::Predicate NanPred =
1645
+ Opcode == Instruction::And ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO;
1641
1646
if (!match (Op0, m_SpecificFCmp (NanPred, m_Value (X), m_AnyZeroFP ())) ||
1642
1647
!match (Op1, m_BinOp (Opcode, m_Value (BO10), m_Value (BO11))))
1643
1648
return nullptr ;
@@ -1666,8 +1671,7 @@ static Instruction *reassociateFCmps(BinaryOperator &BO,
1666
1671
// / Match variations of De Morgan's Laws:
1667
1672
// / (~A & ~B) == (~(A | B))
1668
1673
// / (~A | ~B) == (~(A & B))
1669
- static Instruction *matchDeMorgansLaws (BinaryOperator &I,
1670
- InstCombiner &IC) {
1674
+ static Instruction *matchDeMorgansLaws (BinaryOperator &I, InstCombiner &IC) {
1671
1675
const Instruction::BinaryOps Opcode = I.getOpcode ();
1672
1676
assert ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1673
1677
" Trying to match De Morgan's Laws with something other than and/or" );
@@ -1841,10 +1845,10 @@ Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
1841
1845
Value *Cast1Src = Cast1->getOperand (0 );
1842
1846
1843
1847
// fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1844
- if ((Cast0->hasOneUse () || Cast1->hasOneUse ()) &&
1845
- shouldOptimizeCast (Cast0) && shouldOptimizeCast ( Cast1)) {
1846
- Value *NewOp = Builder. CreateBinOp (LogicOpc, Cast0Src, Cast1Src,
1847
- I.getName ());
1848
+ if ((Cast0->hasOneUse () || Cast1->hasOneUse ()) && shouldOptimizeCast (Cast0) &&
1849
+ shouldOptimizeCast (Cast1)) {
1850
+ Value *NewOp =
1851
+ Builder. CreateBinOp (LogicOpc, Cast0Src, Cast1Src, I.getName ());
1848
1852
return CastInst::Create (CastOpcode, NewOp, DestTy);
1849
1853
}
1850
1854
@@ -2530,7 +2534,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
2530
2534
int Log2ShiftC = ShiftC->exactLogBase2 ();
2531
2535
int Log2C = C->exactLogBase2 ();
2532
2536
bool IsShiftLeft =
2533
- cast<BinaryOperator>(Op0)->getOpcode () == Instruction::Shl;
2537
+ cast<BinaryOperator>(Op0)->getOpcode () == Instruction::Shl;
2534
2538
int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2535
2539
assert (BitNum >= 0 && " Expected demanded bits to handle impossible mask" );
2536
2540
Value *Cmp = Builder.CreateICmpEQ (X, ConstantInt::get (Ty, BitNum));
@@ -3475,8 +3479,8 @@ Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3475
3479
}
3476
3480
} else {
3477
3481
if ((TrueIfSignedL && !TrueIfSignedR &&
3478
- match (LHS0, m_And (m_Value (X), m_Value (Y))) &&
3479
- match (RHS0, m_c_Or (m_Specific (X), m_Specific (Y)))) ||
3482
+ match (LHS0, m_And (m_Value (X), m_Value (Y))) &&
3483
+ match (RHS0, m_c_Or (m_Specific (X), m_Specific (Y)))) ||
3480
3484
(!TrueIfSignedL && TrueIfSignedR &&
3481
3485
match (LHS0, m_Or (m_Value (X), m_Value (Y))) &&
3482
3486
match (RHS0, m_c_And (m_Specific (X), m_Specific (Y))))) {
@@ -4163,8 +4167,8 @@ Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
4163
4167
isSignBitCheck (PredL, *LC, TrueIfSignedL) &&
4164
4168
isSignBitCheck (PredR, *RC, TrueIfSignedR)) {
4165
4169
Value *XorLR = Builder.CreateXor (LHS0, RHS0);
4166
- return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg (XorLR) :
4167
- Builder.CreateIsNotNeg (XorLR);
4170
+ return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg (XorLR)
4171
+ : Builder.CreateIsNotNeg (XorLR);
4168
4172
}
4169
4173
4170
4174
// Fold (icmp pred1 X, C1) ^ (icmp pred2 X, C2)
@@ -4343,8 +4347,8 @@ static Instruction *canonicalizeAbs(BinaryOperator &Xor,
4343
4347
Type *Ty = Xor.getType ();
4344
4348
Value *A;
4345
4349
const APInt *ShAmt;
4346
- if (match (Op1, m_AShr (m_Value (A), m_APInt (ShAmt))) &&
4347
- Op1-> hasNUses ( 2 ) && *ShAmt == Ty->getScalarSizeInBits () - 1 &&
4350
+ if (match (Op1, m_AShr (m_Value (A), m_APInt (ShAmt))) && Op1-> hasNUses ( 2 ) &&
4351
+ *ShAmt == Ty->getScalarSizeInBits () - 1 &&
4348
4352
match (Op0, m_OneUse (m_c_Add (m_Specific (A), m_Specific (Op1))))) {
4349
4353
// Op1 = ashr i32 A, 31 ; smear the sign bit
4350
4354
// xor (add A, Op1), Op1 ; add -1 and flip bits if negative
@@ -4580,7 +4584,8 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
4580
4584
4581
4585
// Move a 'not' ahead of casts of a bool to enable logic reduction:
4582
4586
// not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
4583
- if (match (NotOp, m_OneUse (m_BitCast (m_OneUse (m_SExt (m_Value (X)))))) && X->getType ()->isIntOrIntVectorTy (1 )) {
4587
+ if (match (NotOp, m_OneUse (m_BitCast (m_OneUse (m_SExt (m_Value (X)))))) &&
4588
+ X->getType ()->isIntOrIntVectorTy (1 )) {
4584
4589
Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy ();
4585
4590
Value *NotX = Builder.CreateNot (X);
4586
4591
Value *Sext = Builder.CreateSExt (NotX, SextTy);
@@ -4693,7 +4698,21 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4693
4698
// calls in there are unnecessary as SimplifyDemandedInstructionBits should
4694
4699
// have already taken care of those cases.
4695
4700
Value *Op0 = I.getOperand (0 ), *Op1 = I.getOperand (1 );
4696
- Value *M;
4701
+ Value *X, *Y, *M;
4702
+
4703
+ // (A | B) ^ C -> (A ^ C) ^ B
4704
+ // C ^ (A | B) -> B ^ (A ^ C)
4705
+ if (match (&I, m_c_Xor (m_OneUse (m_c_DisjointOr (m_Value (X), m_Value (Y))),
4706
+ m_Value (M)))) {
4707
+ if (Value *XorAC = simplifyBinOp (Instruction::Xor, X, M, SQ)) {
4708
+ return BinaryOperator::CreateXor (XorAC, Y);
4709
+ }
4710
+
4711
+ if (Value *XorBC = simplifyBinOp (Instruction::Xor, Y, M, SQ)) {
4712
+ return BinaryOperator::CreateXor (XorBC, X);
4713
+ }
4714
+ }
4715
+
4697
4716
if (match (&I, m_c_Xor (m_c_And (m_Not (m_Value (M)), m_Value ()),
4698
4717
m_c_And (m_Deferred (M), m_Value ())))) {
4699
4718
if (isGuaranteedNotToBeUndef (M))
@@ -4705,7 +4724,6 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4705
4724
if (Instruction *Xor = visitMaskedMerge (I, Builder))
4706
4725
return Xor;
4707
4726
4708
- Value *X, *Y;
4709
4727
Constant *C1;
4710
4728
if (match (Op1, m_Constant (C1))) {
4711
4729
Constant *C2;
@@ -4870,14 +4888,14 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4870
4888
// (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
4871
4889
if (match (&I, m_c_Xor (m_OneUse (m_Xor (m_Value (A), m_Value (B))),
4872
4890
m_OneUse (m_c_Or (m_Deferred (A), m_Value (C))))))
4873
- return BinaryOperator::CreateXor (
4874
- Builder. CreateAnd (Builder. CreateNot (A), C), B);
4891
+ return BinaryOperator::CreateXor (Builder. CreateAnd (Builder. CreateNot (A), C),
4892
+ B);
4875
4893
4876
4894
// (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
4877
4895
if (match (&I, m_c_Xor (m_OneUse (m_Xor (m_Value (A), m_Value (B))),
4878
4896
m_OneUse (m_c_Or (m_Deferred (B), m_Value (C))))))
4879
- return BinaryOperator::CreateXor (
4880
- Builder. CreateAnd (Builder. CreateNot (B), C), A);
4897
+ return BinaryOperator::CreateXor (Builder. CreateAnd (Builder. CreateNot (B), C),
4898
+ A);
4881
4899
4882
4900
// (A & B) ^ (A ^ B) -> (A | B)
4883
4901
if (match (Op0, m_And (m_Value (A), m_Value (B))) &&
0 commit comments