Skip to content

Commit 79ce933

Browse files
committed
[InstCombine] Extend (lshr/shl (shl/lshr -1, x), x) -> (lshr/shl -1, x) for multi-use
We previously did this iff the inner `(shl/lshr -1, x)` was one-use. No instructions are added even if the inner `(shl/lshr -1, x)` is multi-use and this canonicalization both makes the resulting instruction easier to analyze and shrinks its dependency chain. Closes #81576
1 parent 2422e96 commit 79ce933

7 files changed

+61
-49
lines changed

llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1206,6 +1206,12 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
12061206
return BinaryOperator::CreateAnd(Mask, X);
12071207
}
12081208

1209+
// Transform (-1 >> y) << y to -1 << y
1210+
if (match(Op0, m_LShr(m_AllOnes(), m_Specific(Op1)))) {
1211+
Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1212+
return BinaryOperator::CreateShl(AllOnes, Op1);
1213+
}
1214+
12091215
Constant *C1;
12101216
if (match(Op1, m_Constant(C1))) {
12111217
Constant *C2;
@@ -1493,6 +1499,12 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
14931499
return BinaryOperator::CreateAnd(Mask, X);
14941500
}
14951501

1502+
// Transform (-1 << y) >> y to -1 >> y
1503+
if (match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1)))) {
1504+
Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1505+
return BinaryOperator::CreateLShr(AllOnes, Op1);
1506+
}
1507+
14961508
if (Instruction *Overflow = foldLShrOverflowBit(I))
14971509
return Overflow;
14981510

llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ define i1 @p0(i8 %x, i8 %y) {
2222
; CHECK-LABEL: @p0(
2323
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
2424
; CHECK-NEXT: call void @use8(i8 [[T0]])
25-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
25+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
2626
; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
2727
; CHECK-NEXT: ret i1 [[RET]]
2828
;
@@ -42,7 +42,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
4242
; CHECK-LABEL: @p1_vec(
4343
; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
4444
; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
45-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <2 x i8> [[T0]], [[Y]]
45+
; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]]
4646
; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]]
4747
; CHECK-NEXT: ret <2 x i1> [[RET]]
4848
;
@@ -58,7 +58,7 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) {
5858
; CHECK-LABEL: @p2_vec_undef0(
5959
; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]]
6060
; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
61-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i8> [[T0]], [[Y]]
61+
; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]]
6262
; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]]
6363
; CHECK-NEXT: ret <3 x i1> [[RET]]
6464
;
@@ -80,7 +80,7 @@ define i1 @c0(i8 %y) {
8080
; CHECK-LABEL: @c0(
8181
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
8282
; CHECK-NEXT: call void @use8(i8 [[T0]])
83-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
83+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
8484
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
8585
; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
8686
; CHECK-NEXT: ret i1 [[RET]]
@@ -98,7 +98,7 @@ define i1 @c1(i8 %y) {
9898
; CHECK-LABEL: @c1(
9999
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
100100
; CHECK-NEXT: call void @use8(i8 [[T0]])
101-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
101+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
102102
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
103103
; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
104104
; CHECK-NEXT: ret i1 [[RET]]
@@ -116,7 +116,7 @@ define i1 @c2(i8 %y) {
116116
; CHECK-LABEL: @c2(
117117
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
118118
; CHECK-NEXT: call void @use8(i8 [[T0]])
119-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
119+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
120120
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
121121
; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
122122
; CHECK-NEXT: ret i1 [[RET]]
@@ -138,7 +138,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
138138
; CHECK-LABEL: @oneuse0(
139139
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
140140
; CHECK-NEXT: call void @use8(i8 [[T0]])
141-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
141+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
142142
; CHECK-NEXT: call void @use8(i8 [[T1]])
143143
; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
144144
; CHECK-NEXT: ret i1 [[RET]]
@@ -156,7 +156,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
156156
; CHECK-LABEL: @oneuse1(
157157
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
158158
; CHECK-NEXT: call void @use8(i8 [[T0]])
159-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
159+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
160160
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
161161
; CHECK-NEXT: call void @use8(i8 [[T2]])
162162
; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
@@ -175,7 +175,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
175175
; CHECK-LABEL: @oneuse2(
176176
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
177177
; CHECK-NEXT: call void @use8(i8 [[T0]])
178-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
178+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
179179
; CHECK-NEXT: call void @use8(i8 [[T1]])
180180
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
181181
; CHECK-NEXT: call void @use8(i8 [[T2]])
@@ -200,7 +200,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) {
200200
; CHECK-LABEL: @n0(
201201
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
202202
; CHECK-NEXT: call void @use8(i8 [[T0]])
203-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
203+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
204204
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
205205
; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[NOTX:%.*]]
206206
; CHECK-NEXT: ret i1 [[RET]]

llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ define i1 @p0(i8 %x, i8 %y) {
2222
; CHECK-LABEL: @p0(
2323
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
2424
; CHECK-NEXT: call void @use8(i8 [[T0]])
25-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
25+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
2626
; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
2727
; CHECK-NEXT: ret i1 [[RET]]
2828
;
@@ -42,7 +42,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
4242
; CHECK-LABEL: @p1_vec(
4343
; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
4444
; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
45-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <2 x i8> [[T0]], [[Y]]
45+
; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]]
4646
; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]]
4747
; CHECK-NEXT: ret <2 x i1> [[RET]]
4848
;
@@ -58,7 +58,7 @@ define <3 x i1> @p2_vec_undef0(<3 x i8> %x, <3 x i8> %y) {
5858
; CHECK-LABEL: @p2_vec_undef0(
5959
; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> <i8 -1, i8 undef, i8 -1>, [[Y:%.*]]
6060
; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
61-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <3 x i8> [[T0]], [[Y]]
61+
; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]]
6262
; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]]
6363
; CHECK-NEXT: ret <3 x i1> [[RET]]
6464
;
@@ -80,7 +80,7 @@ define i1 @c0(i8 %y) {
8080
; CHECK-LABEL: @c0(
8181
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
8282
; CHECK-NEXT: call void @use8(i8 [[T0]])
83-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
83+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
8484
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
8585
; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
8686
; CHECK-NEXT: ret i1 [[RET]]
@@ -98,7 +98,7 @@ define i1 @c1(i8 %y) {
9898
; CHECK-LABEL: @c1(
9999
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
100100
; CHECK-NEXT: call void @use8(i8 [[T0]])
101-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
101+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
102102
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
103103
; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
104104
; CHECK-NEXT: ret i1 [[RET]]
@@ -116,7 +116,7 @@ define i1 @c2(i8 %y) {
116116
; CHECK-LABEL: @c2(
117117
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
118118
; CHECK-NEXT: call void @use8(i8 [[T0]])
119-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
119+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
120120
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
121121
; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
122122
; CHECK-NEXT: ret i1 [[RET]]
@@ -138,7 +138,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
138138
; CHECK-LABEL: @oneuse0(
139139
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
140140
; CHECK-NEXT: call void @use8(i8 [[T0]])
141-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
141+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
142142
; CHECK-NEXT: call void @use8(i8 [[T1]])
143143
; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
144144
; CHECK-NEXT: ret i1 [[RET]]
@@ -156,7 +156,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
156156
; CHECK-LABEL: @oneuse1(
157157
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
158158
; CHECK-NEXT: call void @use8(i8 [[T0]])
159-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
159+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
160160
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
161161
; CHECK-NEXT: call void @use8(i8 [[T2]])
162162
; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
@@ -175,7 +175,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
175175
; CHECK-LABEL: @oneuse2(
176176
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
177177
; CHECK-NEXT: call void @use8(i8 [[T0]])
178-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
178+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
179179
; CHECK-NEXT: call void @use8(i8 [[T1]])
180180
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
181181
; CHECK-NEXT: call void @use8(i8 [[T2]])
@@ -200,7 +200,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) {
200200
; CHECK-LABEL: @n0(
201201
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
202202
; CHECK-NEXT: call void @use8(i8 [[T0]])
203-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i8 [[T0]], [[Y]]
203+
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
204204
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
205205
; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[NOTX:%.*]]
206206
; CHECK-NEXT: ret i1 [[RET]]

llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-d.ll

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
1818
; CHECK-LABEL: @t0_basic(
1919
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
2020
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
21-
; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
21+
; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
2222
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
2323
; CHECK-NEXT: call void @use64(i64 [[T0]])
2424
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -54,7 +54,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
5454
; CHECK-LABEL: @t1_vec_splat(
5555
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
5656
; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
57-
; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
57+
; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
5858
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
5959
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
6060
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -85,7 +85,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
8585
; CHECK-LABEL: @t2_vec_splat_undef(
8686
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
8787
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
88-
; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
88+
; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
8989
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 undef, i32 -33>
9090
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
9191
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -116,7 +116,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
116116
; CHECK-LABEL: @t3_vec_nonsplat(
117117
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
118118
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
119-
; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
119+
; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
120120
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 undef, i32 65>
121121
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
122122
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -149,7 +149,7 @@ define i32 @n4_extrause0(i64 %x, i32 %nbits) {
149149
; CHECK-LABEL: @n4_extrause0(
150150
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
151151
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
152-
; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
152+
; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
153153
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
154154
; CHECK-NEXT: call void @use64(i64 [[T0]])
155155
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -182,7 +182,7 @@ define i32 @n5_extrause1(i64 %x, i32 %nbits) {
182182
; CHECK-LABEL: @n5_extrause1(
183183
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
184184
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
185-
; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
185+
; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
186186
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
187187
; CHECK-NEXT: call void @use64(i64 [[T0]])
188188
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -215,7 +215,7 @@ define i32 @n6_extrause2(i64 %x, i32 %nbits) {
215215
; CHECK-LABEL: @n6_extrause2(
216216
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
217217
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
218-
; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
218+
; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
219219
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -33
220220
; CHECK-NEXT: call void @use64(i64 [[T0]])
221221
; CHECK-NEXT: call void @use64(i64 [[T1]])

llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ declare void @use32(i32)
1616
define i32 @t0_basic(i32 %x, i32 %nbits) {
1717
; CHECK-LABEL: @t0_basic(
1818
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
19-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
19+
; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
2020
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1
2121
; CHECK-NEXT: call void @use32(i32 [[T0]])
2222
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -43,7 +43,7 @@ declare void @use8xi32(<8 x i32>)
4343
define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) {
4444
; CHECK-LABEL: @t2_vec_splat(
4545
; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
46-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]]
46+
; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]]
4747
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
4848
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
4949
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]])
@@ -66,7 +66,7 @@ define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) {
6666
define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) {
6767
; CHECK-LABEL: @t2_vec_splat_undef(
6868
; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>, [[NBITS:%.*]]
69-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]]
69+
; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]]
7070
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 undef, i32 -1>
7171
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
7272
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]])
@@ -89,7 +89,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i32> %x, <8 x i32> %nbits) {
8989
define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) {
9090
; CHECK-LABEL: @t2_vec_nonsplat(
9191
; CHECK-NEXT: [[T0:%.*]] = shl nsw <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
92-
; CHECK-NEXT: [[T1:%.*]] = lshr exact <8 x i32> [[T0]], [[NBITS]]
92+
; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[NBITS]]
9393
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 33>
9494
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
9595
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]])
@@ -114,7 +114,7 @@ define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) {
114114
define i32 @n3_extrause(i32 %x, i32 %nbits) {
115115
; CHECK-LABEL: @n3_extrause(
116116
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
117-
; CHECK-NEXT: [[T1:%.*]] = lshr exact i32 [[T0]], [[NBITS]]
117+
; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS]]
118118
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
119119
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1
120120
; CHECK-NEXT: call void @use32(i32 [[T0]])

llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-d.ll

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
1818
; CHECK-LABEL: @t0_basic(
1919
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
2020
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
21-
; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
21+
; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
2222
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32
2323
; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
2424
; CHECK-NEXT: call void @use64(i64 [[T0]])
@@ -56,7 +56,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
5656
; CHECK-LABEL: @t1_vec_splat(
5757
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
5858
; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
59-
; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
59+
; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
6060
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
6161
; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
6262
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
@@ -89,7 +89,7 @@ define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
8989
; CHECK-LABEL: @t2_vec_splat_undef(
9090
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
9191
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
92-
; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
92+
; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
9393
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
9494
; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
9595
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
@@ -122,7 +122,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
122122
; CHECK-LABEL: @t3_vec_nonsplat(
123123
; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
124124
; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
125-
; CHECK-NEXT: [[T2:%.*]] = lshr exact <8 x i64> [[T1]], [[T0]]
125+
; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
126126
; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
127127
; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
128128
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
@@ -157,7 +157,7 @@ define i32 @n4_extrause(i64 %x, i32 %nbits) {
157157
; CHECK-LABEL: @n4_extrause(
158158
; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
159159
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
160-
; CHECK-NEXT: [[T2:%.*]] = lshr exact i64 [[T1]], [[T0]]
160+
; CHECK-NEXT: [[T2:%.*]] = lshr i64 -1, [[T0]]
161161
; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32
162162
; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
163163
; CHECK-NEXT: call void @use64(i64 [[T0]])

0 commit comments

Comments
 (0)