Skip to content

[ValueTracking] Convert isKnownNonZero to use SimplifyQuery #85863

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion clang/lib/CodeGen/CGCall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4124,7 +4124,8 @@ static bool isProvablyNull(llvm::Value *addr) {
}

static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
return llvm::isKnownNonZero(Addr.getBasePointer(), CGF.CGM.getDataLayout());
return llvm::isKnownNonZero(Addr.getBasePointer(), /*Depth=*/0,
CGF.CGM.getDataLayout());
}

/// Emit the actual writing-back of a writeback.
Expand Down
6 changes: 1 addition & 5 deletions llvm/include/llvm/Analysis/ValueTracking.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,11 +124,7 @@ bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
/// specified, perform context-sensitive analysis and return true if the
/// pointer couldn't possibly be null at the specified instruction.
/// Supports values with integer or pointer type and vectors of integers.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
bool UseInstrInfo = true);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you also need to erase the old API? computeKnownBits has both.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see any reason to keep the old API. It is unlikely to break LLVM downstream users' build (except some static analysis tools).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are a few places it makes the code simpler (i.e the cases with just DL and depth==0).

bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q);

/// Return true if the two given values are negation.
/// Currently can recoginze Value pair:
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Analysis/BasicAliasAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1283,7 +1283,8 @@ AliasResult BasicAAResult::aliasGEP(
// VarIndex = Scale*V.
const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
if (Var.Val.TruncBits == 0 &&
isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
isKnownNonZero(Var.Val.V, /*Depth=*/0,
SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
// Check if abs(V*Scale) >= abs(Scale) holds in the presence of
// potentially wrapping math.
auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
Expand Down
29 changes: 12 additions & 17 deletions llvm/lib/Analysis/InstructionSimplify.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1586,12 +1586,10 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
if (match(UnsignedICmp,
m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
EqPred == ICmpInst::ICMP_NE &&
isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, /*Depth=*/0, Q))
return UnsignedICmp;
if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
EqPred == ICmpInst::ICMP_EQ &&
isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, /*Depth=*/0, Q))
return UnsignedICmp;
}
}
Expand All @@ -1609,13 +1607,13 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
// X > Y && Y == 0 --> Y == 0 iff X != 0
// X > Y || Y == 0 --> X > Y iff X != 0
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
isKnownNonZero(X, /*Depth=*/0, Q))
return IsAnd ? ZeroICmp : UnsignedICmp;

// X <= Y && Y != 0 --> X <= Y iff X != 0
// X <= Y || Y != 0 --> Y != 0 iff X != 0
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
isKnownNonZero(X, /*Depth=*/0, Q))
return IsAnd ? UnsignedICmp : ZeroICmp;

// The transforms below here are expected to be handled more generally with
Expand Down Expand Up @@ -2821,11 +2819,10 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
// the other operand can not be based on the alloc - if it were, then
// the cmp itself would be a capture.
Value *MI = nullptr;
if (isAllocLikeFn(LHS, TLI) &&
llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, /*Depth=*/0, Q))
MI = LHS;
else if (isAllocLikeFn(RHS, TLI) &&
llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
llvm::isKnownNonZero(LHS, /*Depth=*/0, Q))
MI = RHS;
if (MI) {
// FIXME: This is incorrect, see PR54002. While we can assume that the
Expand Down Expand Up @@ -2981,12 +2978,12 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
if (isKnownNonZero(LHS, /*Depth=*/0, Q))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
if (isKnownNonZero(LHS, /*Depth=*/0, Q))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT: {
Expand All @@ -3001,8 +2998,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getTrue(ITy);
if (LHSKnown.isNonNegative() &&
isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, /*Depth=*/0, Q))
return getFalse(ITy);
break;
}
Expand All @@ -3018,8 +3014,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getFalse(ITy);
if (LHSKnown.isNonNegative() &&
isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, /*Depth=*/0, Q))
return getTrue(ITy);
break;
}
Expand Down Expand Up @@ -3172,7 +3167,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
const APInt *C;
if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
(match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
if (isKnownNonZero(RHS, /*Depth=*/0, Q)) {
switch (Pred) {
default:
break;
Expand Down Expand Up @@ -3405,7 +3400,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
!isKnownNonZero(LBO->getOperand(0), Q.DL))
!isKnownNonZero(LBO->getOperand(0), /*Depth=*/0, Q))
break;
if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
RBO->getOperand(1), Q, MaxRecurse - 1))
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Analysis/LazyValueInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@ LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
// instruction is placed, even if it could legally be hoisted much higher.
// That is unfortunate.
PointerType *PT = dyn_cast<PointerType>(BBI->getType());
if (PT && isKnownNonZero(BBI, DL))
if (PT && isKnownNonZero(BBI, /*Depth=*/0, DL))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Side note: We're currently inconsistent about this, but I think it would be best if we moved all the depth parameters after SQ, because end-users of the API don't need to specify depth.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you aren't going to implement that, I will after this patch lands.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The fact that this PR changes it so that isKnownNonZero(BBI, DL) is valid in LLVM 18, used to be valid in LLVM 19, is currently an error in LLVM 19, but will become valid in LLVM 19 again is unfortunate and difficult to account for in downstream projects. @goldsteinn If you are going to change that, do you think it is worth updating downstream code to support the current API, or is that change going to go in fast enough that we can just wait?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If I don't make the llvm19 cutoff, I'll drop it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, I had been slightly optimistic that this was just going to be a few days, but it sounds like it's going to be longer. In that case I will check what is easiest for us (which might be to submit that LLVM change ourselves).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Created #88873 for it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for making the PR :)

return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));

if (BBI->getType()->isIntegerTy()) {
Expand Down Expand Up @@ -1863,7 +1863,8 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Module *M = CxtI->getModule();
const DataLayout &DL = M->getDataLayout();
if (V->getType()->isPointerTy() && C->isNullValue() &&
isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
isKnownNonZero(V->stripPointerCastsSameRepresentation(), /*Depth=*/0,
DL)) {
if (Pred == ICmpInst::ICMP_EQ)
return LazyValueInfo::False;
else if (Pred == ICmpInst::ICMP_NE)
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Analysis/Loads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,8 @@ static bool isDereferenceableAndAlignedPointer(
CheckForFreed));
if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
!CheckForFreed)
if (!CheckForNonNull || isKnownNonZero(V, DL, 0, AC, CtxI, DT)) {
if (!CheckForNonNull ||
isKnownNonZero(V, /*Depth=*/0, SimplifyQuery(DL, DT, AC, CtxI))) {
// As we recursed through GEPs to get here, we've incrementally checked
// that each step advanced by a multiple of the alignment. If our base is
// properly aligned, then the original offset accessed must also be.
Expand Down Expand Up @@ -133,7 +134,8 @@ static bool isDereferenceableAndAlignedPointer(
if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
isKnownNonZero(V, DL, 0, AC, CtxI, DT) && !V->canBeFreed()) {
isKnownNonZero(V, /*Depth=*/0, SimplifyQuery(DL, DT, AC, CtxI)) &&
!V->canBeFreed()) {
// As we recursed through GEPs to get here, we've incrementally
// checked that each step advanced by a multiple of the alignment. If
// our base is properly aligned, then the original offset accessed
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Analysis/ScalarEvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6893,7 +6893,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
uint64_t Rem = MaxVal.urem(Align);
MaxVal -= APInt(BitWidth, Rem);
APInt MinVal = APInt::getZero(BitWidth);
if (llvm::isKnownNonZero(V, DL))
if (llvm::isKnownNonZero(V, /*Depth=*/0, DL))
MinVal = Align;
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange::getNonEmpty(MinVal, MaxVal + 1), RangeType);
Expand Down
17 changes: 4 additions & 13 deletions llvm/lib/Analysis/ValueTracking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -274,16 +274,6 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
unsigned Depth, const SimplifyQuery &Q);

static bool isKnownNonZero(const Value *V, unsigned Depth,
const SimplifyQuery &Q);

bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT, bool UseInstrInfo) {
return ::isKnownNonZero(
V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
}

bool llvm::isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
unsigned Depth) {
return computeKnownBits(V, Depth, SQ).isNonNegative();
Expand All @@ -298,7 +288,7 @@ bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ,
// this updated.
KnownBits Known = computeKnownBits(V, Depth, SQ);
return Known.isNonNegative() &&
(Known.isNonZero() || ::isKnownNonZero(V, Depth, SQ));
(Known.isNonZero() || isKnownNonZero(V, Depth, SQ));
}

bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ,
Expand Down Expand Up @@ -3093,11 +3083,12 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
return false;
}

bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q) {
bool llvm::isKnownNonZero(const Value *V, unsigned Depth,
const SimplifyQuery &Q) {
auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
APInt DemandedElts =
FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
return isKnownNonZero(V, DemandedElts, Depth, Q);
return ::isKnownNonZero(V, DemandedElts, Depth, Q);
}

/// If the pair of operators are the same invertible function, return the
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/CodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2314,7 +2314,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,

// Bail if the value is never zero.
Use &Op = CountZeros->getOperandUse(0);
if (isKnownNonZero(Op, *DL))
if (isKnownNonZero(Op, /*Depth=*/0, *DL))
return false;

// The intrinsic will be sunk behind a compare against zero and branch.
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2452,8 +2452,9 @@ bool AANonNull::isImpliedByIR(Attributor &A, const IRPosition &IRP,
}

if (llvm::any_of(Worklist, [&](AA::ValueAndContext VAC) {
return !isKnownNonZero(VAC.getValue(), A.getDataLayout(), 0, AC,
VAC.getCtxI(), DT);
return !isKnownNonZero(
VAC.getValue(), /*Depth=*/0,
SimplifyQuery(A.getDataLayout(), DT, AC, VAC.getCtxI()));
}))
return false;

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/IPO/FunctionAttrs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1175,7 +1175,7 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
Value *RetVal = FlowsToReturn[i];

// If this value is locally known to be non-null, we're good
if (isKnownNonZero(RetVal, DL))
if (isKnownNonZero(RetVal, /*Depth=*/0, DL))
continue;

// Otherwise, we need to look upwards since we can't make any local
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,7 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
if (C->isOne()) {
if (match(Op0, m_ZExt(m_Add(m_Value(X), m_AllOnes())))) {
const SimplifyQuery Q = SQ.getWithInstruction(&Add);
if (llvm::isKnownNonZero(X, DL, 0, Q.AC, Q.CxtI, Q.DT))
if (llvm::isKnownNonZero(X, /*Depth=*/0, Q))
return new ZExtInst(X, Ty);
}
}
Expand Down
8 changes: 2 additions & 6 deletions llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1031,10 +1031,6 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
!ICmpInst::isEquality(EqPred))
return nullptr;

auto IsKnownNonZero = [&](Value *V) {
return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
};

ICmpInst::Predicate UnsignedPred;

Value *A, *B;
Expand All @@ -1043,9 +1039,9 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
(ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
if (!IsKnownNonZero(NonZero))
if (!isKnownNonZero(NonZero, /*Depth=*/0, Q))
std::swap(NonZero, Other);
return IsKnownNonZero(NonZero);
return isKnownNonZero(NonZero, /*Depth=*/0, Q);
};

// Given ZeroCmpOp = (A + B)
Expand Down
13 changes: 8 additions & 5 deletions llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -601,8 +601,8 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
// then change the 'ZeroIsPoison' parameter to 'true'
// because we know the zero behavior can't affect the result.
if (!Known.One.isZero() ||
isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
&IC.getDominatorTree())) {
isKnownNonZero(Op0, /*Depth=*/0,
IC.getSimplifyQuery().getWithInstruction(&II))) {
if (!match(II.getArgOperand(1), m_One()))
return IC.replaceOperand(II, 1, IC.Builder.getTrue());
}
Expand Down Expand Up @@ -2061,7 +2061,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// See if we can deduce non-null.
if (!CI.hasRetAttr(Attribute::NonNull) &&
(Known.isNonZero() ||
isKnownNonZero(II, DL, /*Depth*/ 0, &AC, II, &DT))) {
isKnownNonZero(II, /*Depth=*/0,
getSimplifyQuery().getWithInstruction(II)))) {
CI.addRetAttr(Attribute::NonNull);
Changed = true;
}
Expand Down Expand Up @@ -3648,7 +3649,8 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
for (Value *V : Call.args()) {
if (V->getType()->isPointerTy() &&
!Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
isKnownNonZero(V, /*Depth=*/0,
getSimplifyQuery().getWithInstruction(&Call)))
ArgNos.push_back(ArgNo);
ArgNo++;
}
Expand Down Expand Up @@ -3828,7 +3830,8 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {

// isKnownNonNull -> nonnull attribute
if (!GCR.hasRetAttr(Attribute::NonNull) &&
isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) {
isKnownNonZero(DerivedPtr, /*Depth=*/0,
getSimplifyQuery().getWithInstruction(&Call))) {
GCR.addRetAttr(Attribute::NonNull);
// We discovered new fact, re-check users.
Worklist.pushUsersToWorkList(GCR);
Expand Down
21 changes: 9 additions & 12 deletions llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1273,12 +1273,12 @@ Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {

// if X non-zero and NoOverflow(X * Y)
// (icmp eq/ne Y)
if (!XKnown.One.isZero() || isKnownNonZero(X, DL, 0, Q.AC, Q.CxtI, Q.DT))
if (!XKnown.One.isZero() || isKnownNonZero(X, /*Depth=*/0, Q))
return new ICmpInst(Pred, Y, Cmp.getOperand(1));

// if Y non-zero and NoOverflow(X * Y)
// (icmp eq/ne X)
if (!YKnown.One.isZero() || isKnownNonZero(Y, DL, 0, Q.AC, Q.CxtI, Q.DT))
if (!YKnown.One.isZero() || isKnownNonZero(Y, /*Depth=*/0, Q))
return new ICmpInst(Pred, X, Cmp.getOperand(1));
}
// Note, we are skipping cases:
Expand Down Expand Up @@ -3087,7 +3087,7 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
// (X + -1) <u C --> X <=u C (if X is never null)
if (Pred == CmpInst::ICMP_ULT && C2->isAllOnes()) {
const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
if (llvm::isKnownNonZero(X, DL, 0, Q.AC, Q.CxtI, Q.DT))
if (llvm::isKnownNonZero(X, /*Depth=*/0, Q))
return new ICmpInst(ICmpInst::ICMP_ULE, X, ConstantInt::get(Ty, C));
}

Expand Down Expand Up @@ -4275,8 +4275,7 @@ static Value *foldICmpWithLowBitMaskedVal(ICmpInst::Predicate Pred, Value *Op0,

// Look for: x & ~Mask pred ~Mask
if (isMaskOrZero(X, /*Not=*/true, Q)) {
return !ICmpInst::isSigned(Pred) ||
isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
return !ICmpInst::isSigned(Pred) || isKnownNonZero(X, /*Depth=*/0, Q);
}
return false;
}
Expand Down Expand Up @@ -4780,8 +4779,7 @@ static Instruction *foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q,
// icmp (X ^ Y_NonZero) s>= X --> icmp (X ^ Y_NonZero) s> X
// icmp (X ^ Y_NonZero) s<= X --> icmp (X ^ Y_NonZero) s< X
CmpInst::Predicate PredOut = CmpInst::getStrictPredicate(Pred);
if (PredOut != Pred &&
isKnownNonZero(A, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
if (PredOut != Pred && isKnownNonZero(A, /*Depth=*/0, Q))
return new ICmpInst(PredOut, Op0, Op1);

return nullptr;
Expand Down Expand Up @@ -5064,11 +5062,11 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
return new ICmpInst(Pred, C, D);
// (A - B) u>=/u< A --> B u>/u<= A iff B != 0
if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
isKnownNonZero(B, /*Depth=*/0, Q))
return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
// C u<=/u> (C - D) --> C u</u>= D iff B != 0
if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
isKnownNonZero(D, /*Depth=*/0, Q))
return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);

// icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
Expand Down Expand Up @@ -5110,14 +5108,13 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
// X * Z eq/ne Y * Z -> X eq/ne Y
if (ZKnown.countMaxTrailingZeros() == 0)
return new ICmpInst(Pred, X, Y);
NonZero = !ZKnown.One.isZero() ||
isKnownNonZero(Z, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
NonZero = !ZKnown.One.isZero() || isKnownNonZero(Z, /*Depth=*/0, Q);
// if Z != 0 and nsw(X * Z) and nsw(Y * Z)
// X * Z eq/ne Y * Z -> X eq/ne Y
if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
return new ICmpInst(Pred, X, Y);
} else
NonZero = isKnownNonZero(Z, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
NonZero = isKnownNonZero(Z, /*Depth=*/0, Q);

// If Z != 0 and nuw(X * Z) and nuw(Y * Z)
// X * Z u{lt/le/gt/ge}/eq/ne Y * Z -> X u{lt/le/gt/ge}/eq/ne Y
Expand Down
Loading