@@ -594,12 +594,13 @@ class EarlyCSE {
594
594
unsigned Generation = 0 ;
595
595
int MatchingId = -1 ;
596
596
bool IsAtomic = false ;
597
+ bool IsLoad = false ;
597
598
598
599
LoadValue () = default ;
599
600
LoadValue (Instruction *Inst, unsigned Generation, unsigned MatchingId,
600
- bool IsAtomic)
601
+ bool IsAtomic, bool IsLoad )
601
602
: DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
602
- IsAtomic (IsAtomic) {}
603
+ IsAtomic (IsAtomic), IsLoad(IsLoad) {}
603
604
};
604
605
605
606
using LoadMapAllocator =
@@ -1492,8 +1493,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
1492
1493
LLVM_DEBUG (dbgs () << " Skipping due to debug counter\n " );
1493
1494
continue ;
1494
1495
}
1495
- if (auto *I = dyn_cast<Instruction>(Op))
1496
- combineMetadataForCSE (I, &Inst, false );
1496
+ if (InVal.IsLoad )
1497
+ if (auto *I = dyn_cast<Instruction>(Op))
1498
+ combineMetadataForCSE (I, &Inst, false );
1497
1499
if (!Inst.use_empty ())
1498
1500
Inst.replaceAllUsesWith (Op);
1499
1501
salvageKnowledge (&Inst, &AC);
@@ -1508,7 +1510,8 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
1508
1510
AvailableLoads.insert (MemInst.getPointerOperand (),
1509
1511
LoadValue (&Inst, CurrentGeneration,
1510
1512
MemInst.getMatchingId (),
1511
- MemInst.isAtomic ()));
1513
+ MemInst.isAtomic (),
1514
+ MemInst.isLoad ()));
1512
1515
LastStore = nullptr ;
1513
1516
continue ;
1514
1517
}
@@ -1632,7 +1635,8 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
1632
1635
AvailableLoads.insert (MemInst.getPointerOperand (),
1633
1636
LoadValue (&Inst, CurrentGeneration,
1634
1637
MemInst.getMatchingId (),
1635
- MemInst.isAtomic ()));
1638
+ MemInst.isAtomic (),
1639
+ MemInst.isLoad ()));
1636
1640
1637
1641
// Remember that this was the last unordered store we saw for DSE. We
1638
1642
// don't yet handle DSE on ordered or volatile stores since we don't
0 commit comments