@@ -688,29 +688,35 @@ class EarlyCSE {
688
688
public:
689
689
ParseMemoryInst (Instruction *Inst, const TargetTransformInfo &TTI)
690
690
: Inst(Inst) {
691
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
691
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
692
692
if (TTI.getTgtMemIntrinsic (II, Info))
693
- IsTargetMemInst = true ;
693
+ IntrID = II->getIntrinsicID ();
694
+ }
694
695
}
695
696
697
+ Instruction *get () { return Inst; }
698
+ const Instruction *get () const { return Inst; }
699
+
696
700
bool isLoad () const {
697
- if (IsTargetMemInst) return Info.ReadMem ;
701
+ if (IntrID != 0 )
702
+ return Info.ReadMem ;
698
703
return isa<LoadInst>(Inst);
699
704
}
700
705
701
706
bool isStore () const {
702
- if (IsTargetMemInst) return Info.WriteMem ;
707
+ if (IntrID != 0 )
708
+ return Info.WriteMem ;
703
709
return isa<StoreInst>(Inst);
704
710
}
705
711
706
712
bool isAtomic () const {
707
- if (IsTargetMemInst )
713
+ if (IntrID != 0 )
708
714
return Info.Ordering != AtomicOrdering::NotAtomic;
709
715
return Inst->isAtomic ();
710
716
}
711
717
712
718
bool isUnordered () const {
713
- if (IsTargetMemInst )
719
+ if (IntrID != 0 )
714
720
return Info.isUnordered ();
715
721
716
722
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
@@ -723,7 +729,7 @@ class EarlyCSE {
723
729
}
724
730
725
731
bool isVolatile () const {
726
- if (IsTargetMemInst )
732
+ if (IntrID != 0 )
727
733
return Info.IsVolatile ;
728
734
729
735
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
@@ -753,27 +759,31 @@ class EarlyCSE {
753
759
// field in the MemIntrinsicInfo structure. That field contains
754
760
// non-negative values only.
755
761
int getMatchingId () const {
756
- if (IsTargetMemInst) return Info.MatchingId ;
762
+ if (IntrID != 0 )
763
+ return Info.MatchingId ;
757
764
return -1 ;
758
765
}
759
766
760
767
Value *getPointerOperand () const {
761
- if (IsTargetMemInst) return Info.PtrVal ;
768
+ if (IntrID != 0 )
769
+ return Info.PtrVal ;
762
770
return getLoadStorePointerOperand (Inst);
763
771
}
764
772
765
773
bool mayReadFromMemory () const {
766
- if (IsTargetMemInst) return Info.ReadMem ;
774
+ if (IntrID != 0 )
775
+ return Info.ReadMem ;
767
776
return Inst->mayReadFromMemory ();
768
777
}
769
778
770
779
bool mayWriteToMemory () const {
771
- if (IsTargetMemInst) return Info.WriteMem ;
780
+ if (IntrID != 0 )
781
+ return Info.WriteMem ;
772
782
return Inst->mayWriteToMemory ();
773
783
}
774
784
775
785
private:
776
- bool IsTargetMemInst = false ;
786
+ Intrinsic::ID IntrID = 0 ;
777
787
MemIntrinsicInfo Info;
778
788
Instruction *Inst;
779
789
};
@@ -783,6 +793,9 @@ class EarlyCSE {
783
793
bool handleBranchCondition (Instruction *CondInst, const BranchInst *BI,
784
794
const BasicBlock *BB, const BasicBlock *Pred);
785
795
796
+ Value *getMatchingValue (LoadValue &InVal, ParseMemoryInst &MemInst,
797
+ unsigned CurrentGeneration);
798
+
786
799
Value *getOrCreateResult (Value *Inst, Type *ExpectedType) const {
787
800
if (auto *LI = dyn_cast<LoadInst>(Inst))
788
801
return LI;
@@ -945,6 +958,33 @@ bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
945
958
return MadeChanges;
946
959
}
947
960
961
+ Value *EarlyCSE::getMatchingValue (LoadValue &InVal, ParseMemoryInst &MemInst,
962
+ unsigned CurrentGeneration) {
963
+ if (InVal.DefInst == nullptr )
964
+ return nullptr ;
965
+ if (InVal.MatchingId != MemInst.getMatchingId ())
966
+ return nullptr ;
967
+ // We don't yet handle removing loads with ordering of any kind.
968
+ if (MemInst.isVolatile () || !MemInst.isUnordered ())
969
+ return nullptr ;
970
+ // We can't replace an atomic load with one which isn't also atomic.
971
+ if (MemInst.isLoad () && !InVal.IsAtomic && MemInst.isAtomic ())
972
+ return nullptr ;
973
+ // The value V returned from this function is used differently depending
974
+ // on whether MemInst is a load or a store. If it's a load, we will replace
975
+ // MemInst with V, if it's a store, we will check if V is the same as the
976
+ // available value.
977
+ bool MemInstMatching = !MemInst.isLoad ();
978
+ Instruction *Matching = MemInstMatching ? MemInst.get () : InVal.DefInst ;
979
+ Instruction *Other = MemInstMatching ? InVal.DefInst : MemInst.get ();
980
+
981
+ if (!isOperatingOnInvariantMemAt (MemInst.get (), InVal.Generation ) &&
982
+ !isSameMemGeneration (InVal.Generation , CurrentGeneration, InVal.DefInst ,
983
+ MemInst.get ()))
984
+ return nullptr ;
985
+ return getOrCreateResult (Matching, Other->getType ());
986
+ }
987
+
948
988
bool EarlyCSE::processNode (DomTreeNode *Node) {
949
989
bool Changed = false ;
950
990
BasicBlock *BB = Node->getBlock ();
@@ -1161,32 +1201,21 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
1161
1201
// we can assume the current load loads the same value as the dominating
1162
1202
// load.
1163
1203
LoadValue InVal = AvailableLoads.lookup (MemInst.getPointerOperand ());
1164
- if (InVal.DefInst != nullptr &&
1165
- InVal.MatchingId == MemInst.getMatchingId () &&
1166
- // We don't yet handle removing loads with ordering of any kind.
1167
- !MemInst.isVolatile () && MemInst.isUnordered () &&
1168
- // We can't replace an atomic load with one which isn't also atomic.
1169
- InVal.IsAtomic >= MemInst.isAtomic () &&
1170
- (isOperatingOnInvariantMemAt (&Inst, InVal.Generation ) ||
1171
- isSameMemGeneration (InVal.Generation , CurrentGeneration,
1172
- InVal.DefInst , &Inst))) {
1173
- Value *Op = getOrCreateResult (InVal.DefInst , Inst.getType ());
1174
- if (Op != nullptr ) {
1175
- LLVM_DEBUG (dbgs () << " EarlyCSE CSE LOAD: " << Inst
1176
- << " to: " << *InVal.DefInst << ' \n ' );
1177
- if (!DebugCounter::shouldExecute (CSECounter)) {
1178
- LLVM_DEBUG (dbgs () << " Skipping due to debug counter\n " );
1179
- continue ;
1180
- }
1181
- if (!Inst.use_empty ())
1182
- Inst.replaceAllUsesWith (Op);
1183
- salvageKnowledge (&Inst, &AC);
1184
- removeMSSA (Inst);
1185
- Inst.eraseFromParent ();
1186
- Changed = true ;
1187
- ++NumCSELoad;
1204
+ if (Value *Op = getMatchingValue (InVal, MemInst, CurrentGeneration)) {
1205
+ LLVM_DEBUG (dbgs () << " EarlyCSE CSE LOAD: " << Inst
1206
+ << " to: " << *InVal.DefInst << ' \n ' );
1207
+ if (!DebugCounter::shouldExecute (CSECounter)) {
1208
+ LLVM_DEBUG (dbgs () << " Skipping due to debug counter\n " );
1188
1209
continue ;
1189
1210
}
1211
+ if (!Inst.use_empty ())
1212
+ Inst.replaceAllUsesWith (Op);
1213
+ salvageKnowledge (&Inst, &AC);
1214
+ removeMSSA (Inst);
1215
+ Inst.eraseFromParent ();
1216
+ Changed = true ;
1217
+ ++NumCSELoad;
1218
+ continue ;
1190
1219
}
1191
1220
1192
1221
// Otherwise, remember that we have this instruction.
@@ -1256,13 +1285,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
1256
1285
if (MemInst.isValid () && MemInst.isStore ()) {
1257
1286
LoadValue InVal = AvailableLoads.lookup (MemInst.getPointerOperand ());
1258
1287
if (InVal.DefInst &&
1259
- InVal.DefInst == getOrCreateResult (&Inst, InVal.DefInst ->getType ()) &&
1260
- InVal.MatchingId == MemInst.getMatchingId () &&
1261
- // We don't yet handle removing stores with ordering of any kind.
1262
- !MemInst.isVolatile () && MemInst.isUnordered () &&
1263
- (isOperatingOnInvariantMemAt (&Inst, InVal.Generation ) ||
1264
- isSameMemGeneration (InVal.Generation , CurrentGeneration,
1265
- InVal.DefInst , &Inst))) {
1288
+ InVal.DefInst == getMatchingValue (InVal, MemInst, CurrentGeneration)) {
1266
1289
// It is okay to have a LastStore to a different pointer here if MemorySSA
1267
1290
// tells us that the load and store are from the same memory generation.
1268
1291
// In that case, LastStore should keep its present value since we're
0 commit comments