-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[InstSimplify][InstCombine] Remove unnecessary m_c_*
matchers.
#81712
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-ir @llvm/pr-subscribers-llvm-transforms Author: Yingwei Zheng (dtcxzyw) ChangesThis patch removes unnecessary Compile-time impact: https://llvm-compile-time-tracker.com/compare.php?from=bfc0b7c6891896ee8e9818f22800472510093864&to=d27b058bb9acaa43d3cadbf3cd889e8f79e5c634&stat=instructions:u Full diff: https://github.com/llvm/llvm-project/pull/81712.diff 6 Files Affected:
diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index 1aa324c6b5f380..055f121e743411 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -76,7 +76,7 @@ static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
// Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
// with a new integer type of the corresponding bit width.
- if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) {
+ if (match(J, m_And(m_Instruction(I), m_APInt(M)))) {
int32_t Bits = (*M + 1).exactLogBase2();
if (Bits > 0) {
RT = IntegerType::get(Phi->getContext(), Bits);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 51e258d69e9e2e..2d72a032a6c6d5 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -3246,8 +3246,8 @@ static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
Value *X;
const APInt *C1, *C2;
- if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
- !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
+ if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
+ !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
return false;
return (C1->slt(*C2) && C1->isNonNegative()) ||
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index 7a3b708e740067..5050091836b7f9 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -623,7 +623,7 @@ bool VPIntrinsic::canIgnoreVectorLengthParam() const {
if (EC.isScalable()) {
// Compare vscale patterns
uint64_t VScaleFactor;
- if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale())))
+ if (match(VLParam, m_Mul(m_VScale(), m_ConstantInt(VScaleFactor))))
return VScaleFactor >= EC.getKnownMinValue();
return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale());
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 4465eb8992fbbf..0af9a2786f6901 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4450,7 +4450,7 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
}
// ~(X + C) --> ~C - X
- if (match(NotVal, m_c_Add(m_Value(X), m_ImmConstant(C))))
+ if (match(NotVal, m_Add(m_Value(X), m_ImmConstant(C))))
return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X);
// ~(X - Y) --> ~X + Y
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
index 62e49469cb0198..f73679f9461bad 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
@@ -258,9 +258,9 @@ std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
case Instruction::And: {
Constant *ShAmt;
// sub(y,and(lshr(x,C),1)) --> add(ashr(shl(x,(BW-1)-C),BW-1),y)
- if (match(I, m_c_And(m_OneUse(m_TruncOrSelf(
- m_LShr(m_Value(X), m_ImmConstant(ShAmt)))),
- m_One()))) {
+ if (match(I, m_And(m_OneUse(m_TruncOrSelf(
+ m_LShr(m_Value(X), m_ImmConstant(ShAmt)))),
+ m_One()))) {
unsigned BW = X->getType()->getScalarSizeInBits();
Constant *BWMinusOne = ConstantInt::get(X->getType(), BW - 1);
Value *R = Builder.CreateShl(X, Builder.CreateSub(BWMinusOne, ShAmt));
diff --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll
index ac2ebf52ed6296..1e90f0edbd8003 100644
--- a/llvm/test/Transforms/InstSimplify/compare.ll
+++ b/llvm/test/Transforms/InstSimplify/compare.ll
@@ -2453,36 +2453,6 @@ define i1 @icmp_nsw_2(i32 %V) {
ret i1 %cmp
}
-define i1 @icmp_nsw_commute(i32 %V) {
-; CHECK-LABEL: @icmp_nsw_commute(
-; CHECK-NEXT: ret i1 true
-;
- %add5 = add i32 5, %V
- %add6 = add nsw i32 %V, 6
- %cmp = icmp slt i32 %add5, %add6
- ret i1 %cmp
-}
-
-define i1 @icmp_nsw_commute2(i32 %V) {
-; CHECK-LABEL: @icmp_nsw_commute2(
-; CHECK-NEXT: ret i1 true
-;
- %add5 = add i32 %V, 5
- %add6 = add nsw i32 6, %V
- %cmp = icmp slt i32 %add5, %add6
- ret i1 %cmp
-}
-
-define i1 @icmp_nsw_commute3(i32 %V) {
-; CHECK-LABEL: @icmp_nsw_commute3(
-; CHECK-NEXT: ret i1 true
-;
- %add5 = add i32 5, %V
- %add6 = add nsw i32 6, %V
- %cmp = icmp slt i32 %add5, %add6
- ret i1 %cmp
-}
-
define i1 @icmp_nsw_22(i32 %V) {
; CHECK-LABEL: @icmp_nsw_22(
; CHECK-NEXT: ret i1 true
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
(Shouldn't mark this as NFC though, as it does change optimization behavior for InstSimplify.) |
m_c_*
matchers. NFC.m_c_*
matchers.
@@ -3246,8 +3246,8 @@ static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS, | |||
|
|||
Value *X; | |||
const APInt *C1, *C2; | |||
if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) || | |||
!match(RHS, m_c_Add(m_Specific(X), m_APInt(C2)))) | |||
if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) || |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'd expect the InstSimplify uses to stay as they are?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We only handle non-canonical patterns in InstSimplify if there is phase-ordering justification to do so, which is absent here. I did check the original review to confirm that this was done simply "because we can" and not for a specific reason.
This patch removes unnecessary
m_c_*
matchers since we always canonicalizecommutive_op Cst, X
intocommutive_op X, Cst
.Compile-time impact: https://llvm-compile-time-tracker.com/compare.php?from=bfc0b7c6891896ee8e9818f22800472510093864&to=d27b058bb9acaa43d3cadbf3cd889e8f79e5c634&stat=instructions:u