@@ -800,8 +800,13 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
800
800
Value *Ptr, PredicatedScalarEvolution &PSE) {
801
801
// The access function must stride over the innermost loop.
802
802
if (Lp != AR->getLoop ()) {
803
- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not striding over innermost loop "
804
- << *Ptr << " SCEV: " << *AR << " \n " );
803
+ LLVM_DEBUG ({
804
+ dbgs () << " LAA: Bad stride - Not striding over innermost loop " ;
805
+ if (Ptr)
806
+ dbgs () << *Ptr << " " ;
807
+
808
+ dbgs () << " SCEV: " << *AR << " \n " ;
809
+ });
805
810
return std::nullopt;
806
811
}
807
812
@@ -811,8 +816,12 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
811
816
// Calculate the pointer stride and check if it is constant.
812
817
const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
813
818
if (!C) {
814
- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not a constant strided " << *Ptr
815
- << " SCEV: " << *AR << " \n " );
819
+ LLVM_DEBUG ({
820
+ dbgs () << " LAA: Bad stride - Not a constant strided " ;
821
+ if (Ptr)
822
+ dbgs () << *Ptr << " " ;
823
+ dbgs () << " SCEV: " << *AR << " \n " ;
824
+ });
816
825
return std::nullopt;
817
826
}
818
827
@@ -839,8 +848,8 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
839
848
static bool isNoWrapGEP (Value *Ptr, PredicatedScalarEvolution &PSE,
840
849
const Loop *L);
841
850
842
- // / Check whether \p AR is a non-wrapping AddRec, or if \p Ptr is a non-wrapping
843
- // / GEP .
851
+ // / Check whether a pointer address cannot wrap. If \p Ptr is not nullptr, use
852
+ // / informating from the IR pointer value to determine no-wrap .
844
853
static bool isNoWrap (PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
845
854
Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
846
855
std::optional<int64_t > Stride = std::nullopt) {
@@ -861,7 +870,7 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
861
870
// location will be larger than half the pointer index type space. In that
862
871
// case, the GEP would be poison and any memory access dependent on it would
863
872
// be immediate UB when executed.
864
- if (auto *GEP = dyn_cast <GetElementPtrInst>(Ptr);
873
+ if (auto *GEP = dyn_cast_if_present <GetElementPtrInst>(Ptr);
865
874
GEP && GEP->hasNoUnsignedSignedWrap ())
866
875
return true ;
867
876
@@ -877,6 +886,9 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
877
886
return true ;
878
887
}
879
888
889
+ if (!Ptr)
890
+ return false ;
891
+
880
892
if (Assume) {
881
893
PSE.setNoOverflow (Ptr, SCEVWrapPredicate::IncrementNUSW);
882
894
LLVM_DEBUG (dbgs () << " LAA: Pointer may wrap:\n "
@@ -1144,13 +1156,10 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1144
1156
1145
1157
// When we run after a failing dependency check we have to make sure
1146
1158
// we don't have wrapping pointers.
1147
- if (ShouldCheckWrap) {
1148
- // Skip wrap checking when translating pointers.
1149
- if (TranslatedPtrs.size () > 1 )
1150
- return false ;
1151
-
1152
- if (!isNoWrap (PSE, AR, Ptr, AccessTy, TheLoop, Assume))
1153
- return false ;
1159
+ if (ShouldCheckWrap &&
1160
+ !isNoWrap (PSE, AR, TranslatedPtrs.size () == 1 ? Ptr : nullptr , AccessTy,
1161
+ TheLoop, Assume)) {
1162
+ return false ;
1154
1163
}
1155
1164
}
1156
1165
@@ -1457,6 +1466,9 @@ void AccessAnalysis::processMemAccesses() {
1457
1466
// / Check whether \p Ptr is non-wrapping GEP.
1458
1467
static bool isNoWrapGEP (Value *Ptr, PredicatedScalarEvolution &PSE,
1459
1468
const Loop *L) {
1469
+ if (PSE.hasNoOverflow (Ptr, SCEVWrapPredicate::IncrementNUSW))
1470
+ return true ;
1471
+
1460
1472
// Scalar evolution does not propagate the non-wrapping flags to values that
1461
1473
// are derived from a non-wrapping induction variable because non-wrapping
1462
1474
// could be flow-sensitive.
0 commit comments