Skip to content

Commit b9a1e58

Browse files
authored
[RISCVISel] Compute leading zeros for RISCVISD::VCPOP_VL node (#127705)
This patch adds handling of the RISCVISD::VCPOP_VL node in RISCVTargetLowering::computeKnownBitsForTargetNode. It eliminates redundant zero-extension instructions.
1 parent a2b4d4e commit b9a1e58

File tree

3 files changed

+221
-0
lines changed

3 files changed

+221
-0
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19462,6 +19462,11 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1946219462
Known = Known.intersectWith(Known2);
1946319463
break;
1946419464
}
19465+
case RISCVISD::VCPOP_VL: {
19466+
KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(2), Depth + 1);
19467+
Known.Zero.setBitsFrom(Known2.countMaxActiveBits());
19468+
break;
19469+
}
1946519470
case RISCVISD::CZERO_EQZ:
1946619471
case RISCVISD::CZERO_NEZ:
1946719472
Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s --check-prefixes=CHECK,RV32
3+
; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s --check-prefixes=CHECK,RV64
4+
5+
define i32 @test(<8 x i1> %mask) {
6+
; CHECK-LABEL: test:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
9+
; CHECK-NEXT: vcpop.m a0, v0
10+
; CHECK-NEXT: ret
11+
%1 = bitcast <8 x i1> %mask to i8
12+
%2 = call range(i8 0, 9) i8 @llvm.ctpop.i8(i8 %1)
13+
%3 = zext nneg i8 %2 to i32
14+
ret i32 %3
15+
}
16+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
17+
; RV32: {{.*}}
18+
; RV64: {{.*}}
Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s --check-prefixes=CHECK,RV32
3+
; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s --check-prefixes=CHECK,RV64
4+
5+
define dso_local void @test_store1(ptr nocapture noundef writeonly %dst, ptr nocapture noundef readonly %src, i32 noundef signext %c, i32 noundef signext %n) {
6+
; RV32-LABEL: test_store1:
7+
; RV32: # %bb.0: # %entry
8+
; RV32-NEXT: blez a3, .LBB0_6
9+
; RV32-NEXT: # %bb.1: # %for.body.preheader
10+
; RV32-NEXT: li a4, 8
11+
; RV32-NEXT: bltu a3, a4, .LBB0_7
12+
; RV32-NEXT: # %bb.2: # %for.body.preheader
13+
; RV32-NEXT: sub a4, a0, a1
14+
; RV32-NEXT: sltu a5, a0, a1
15+
; RV32-NEXT: neg a5, a5
16+
; RV32-NEXT: sltiu a4, a4, 32
17+
; RV32-NEXT: seqz a5, a5
18+
; RV32-NEXT: and a4, a5, a4
19+
; RV32-NEXT: bnez a4, .LBB0_7
20+
; RV32-NEXT: # %bb.3: # %vector.ph
21+
; RV32-NEXT: lui a5, 524288
22+
; RV32-NEXT: addi a5, a5, -8
23+
; RV32-NEXT: and a5, a3, a5
24+
; RV32-NEXT: li a7, 0
25+
; RV32-NEXT: li a6, 0
26+
; RV32-NEXT: .LBB0_4: # %vector.body
27+
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
28+
; RV32-NEXT: slli t0, a7, 2
29+
; RV32-NEXT: addi t1, a7, 8
30+
; RV32-NEXT: add t0, a1, t0
31+
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
32+
; RV32-NEXT: vle32.v v8, (t0)
33+
; RV32-NEXT: sltu a7, t1, a7
34+
; RV32-NEXT: xor t0, t1, a5
35+
; RV32-NEXT: add a6, a6, a7
36+
; RV32-NEXT: vmslt.vx v10, v8, a2
37+
; RV32-NEXT: vcompress.vm v12, v8, v10
38+
; RV32-NEXT: vcpop.m a7, v10
39+
; RV32-NEXT: vsetvli zero, a7, e32, m2, ta, ma
40+
; RV32-NEXT: vse32.v v12, (a0)
41+
; RV32-NEXT: slli a7, a7, 2
42+
; RV32-NEXT: or t0, t0, a6
43+
; RV32-NEXT: add a0, a0, a7
44+
; RV32-NEXT: mv a7, t1
45+
; RV32-NEXT: bnez t0, .LBB0_4
46+
; RV32-NEXT: # %bb.5: # %middle.block
47+
; RV32-NEXT: bne a5, a3, .LBB0_9
48+
; RV32-NEXT: .LBB0_6: # %for.cond.cleanup
49+
; RV32-NEXT: ret
50+
; RV32-NEXT: .LBB0_7:
51+
; RV32-NEXT: li a5, 0
52+
; RV32-NEXT: li a4, 0
53+
; RV32-NEXT: j .LBB0_9
54+
; RV32-NEXT: .LBB0_8: # %for.inc
55+
; RV32-NEXT: # in Loop: Header=BB0_9 Depth=1
56+
; RV32-NEXT: addi a5, a5, 1
57+
; RV32-NEXT: seqz a6, a5
58+
; RV32-NEXT: add a4, a4, a6
59+
; RV32-NEXT: xor a6, a5, a3
60+
; RV32-NEXT: or a6, a6, a4
61+
; RV32-NEXT: beqz a6, .LBB0_6
62+
; RV32-NEXT: .LBB0_9: # %for.body
63+
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
64+
; RV32-NEXT: slli a6, a5, 2
65+
; RV32-NEXT: add a6, a1, a6
66+
; RV32-NEXT: lw a6, 0(a6)
67+
; RV32-NEXT: bge a6, a2, .LBB0_8
68+
; RV32-NEXT: # %bb.10: # %if.then
69+
; RV32-NEXT: # in Loop: Header=BB0_9 Depth=1
70+
; RV32-NEXT: addi a7, a0, 4
71+
; RV32-NEXT: sw a6, 0(a0)
72+
; RV32-NEXT: mv a0, a7
73+
; RV32-NEXT: j .LBB0_8
74+
;
75+
; RV64-LABEL: test_store1:
76+
; RV64: # %bb.0: # %entry
77+
; RV64-NEXT: blez a3, .LBB0_6
78+
; RV64-NEXT: # %bb.1: # %for.body.preheader
79+
; RV64-NEXT: li a5, 8
80+
; RV64-NEXT: li a4, 0
81+
; RV64-NEXT: bltu a3, a5, .LBB0_7
82+
; RV64-NEXT: # %bb.2: # %for.body.preheader
83+
; RV64-NEXT: sub a5, a0, a1
84+
; RV64-NEXT: li a6, 31
85+
; RV64-NEXT: bgeu a6, a5, .LBB0_7
86+
; RV64-NEXT: # %bb.3: # %vector.ph
87+
; RV64-NEXT: lui a4, 524288
88+
; RV64-NEXT: addiw a4, a4, -8
89+
; RV64-NEXT: and a4, a3, a4
90+
; RV64-NEXT: slli a5, a4, 2
91+
; RV64-NEXT: add a5, a5, a1
92+
; RV64-NEXT: mv a6, a1
93+
; RV64-NEXT: .LBB0_4: # %vector.body
94+
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
95+
; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
96+
; RV64-NEXT: vle32.v v8, (a6)
97+
; RV64-NEXT: addi a6, a6, 32
98+
; RV64-NEXT: vmslt.vx v10, v8, a2
99+
; RV64-NEXT: vcompress.vm v12, v8, v10
100+
; RV64-NEXT: vcpop.m a7, v10
101+
; RV64-NEXT: vsetvli zero, a7, e32, m2, ta, ma
102+
; RV64-NEXT: vse32.v v12, (a0)
103+
; RV64-NEXT: slli a7, a7, 2
104+
; RV64-NEXT: add a0, a0, a7
105+
; RV64-NEXT: bne a6, a5, .LBB0_4
106+
; RV64-NEXT: # %bb.5: # %middle.block
107+
; RV64-NEXT: bne a4, a3, .LBB0_7
108+
; RV64-NEXT: .LBB0_6: # %for.cond.cleanup
109+
; RV64-NEXT: ret
110+
; RV64-NEXT: .LBB0_7: # %for.body.preheader13
111+
; RV64-NEXT: slli a4, a4, 2
112+
; RV64-NEXT: slli a5, a3, 2
113+
; RV64-NEXT: add a3, a1, a4
114+
; RV64-NEXT: add a1, a1, a5
115+
; RV64-NEXT: j .LBB0_9
116+
; RV64-NEXT: .LBB0_8: # %for.inc
117+
; RV64-NEXT: # in Loop: Header=BB0_9 Depth=1
118+
; RV64-NEXT: addi a3, a3, 4
119+
; RV64-NEXT: beq a3, a1, .LBB0_6
120+
; RV64-NEXT: .LBB0_9: # %for.body
121+
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
122+
; RV64-NEXT: lw a4, 0(a3)
123+
; RV64-NEXT: bge a4, a2, .LBB0_8
124+
; RV64-NEXT: # %bb.10: # %if.then
125+
; RV64-NEXT: # in Loop: Header=BB0_9 Depth=1
126+
; RV64-NEXT: addi a5, a0, 4
127+
; RV64-NEXT: sw a4, 0(a0)
128+
; RV64-NEXT: mv a0, a5
129+
; RV64-NEXT: j .LBB0_8
130+
entry:
131+
%cmp8 = icmp sgt i32 %n, 0
132+
br i1 %cmp8, label %for.body.preheader, label %for.cond.cleanup
133+
134+
for.body.preheader: ; preds = %entry
135+
%dst11 = ptrtoint ptr %dst to i64
136+
%src12 = ptrtoint ptr %src to i64
137+
%wide.trip.count = zext nneg i32 %n to i64
138+
%min.iters.check = icmp ult i32 %n, 8
139+
%0 = sub i64 %dst11, %src12
140+
%diff.check = icmp ult i64 %0, 32
141+
%or.cond = or i1 %min.iters.check, %diff.check
142+
br i1 %or.cond, label %for.body.preheader13, label %vector.ph
143+
144+
for.body.preheader13: ; preds = %middle.block, %for.body.preheader
145+
%indvars.iv.ph = phi i64 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
146+
%dst.addr.09.ph = phi ptr [ %dst, %for.body.preheader ], [ %monotonic.add, %middle.block ]
147+
br label %for.body
148+
149+
vector.ph: ; preds = %for.body.preheader
150+
%n.vec = and i64 %wide.trip.count, 2147483640
151+
%broadcast.splatinsert = insertelement <8 x i32> poison, i32 %c, i64 0
152+
%broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
153+
br label %vector.body
154+
155+
vector.body: ; preds = %vector.body, %vector.ph
156+
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
157+
%monotonic.iv = phi ptr [ %dst, %vector.ph ], [ %monotonic.add, %vector.body ]
158+
%1 = getelementptr inbounds i32, ptr %src, i64 %index
159+
%wide.load = load <8 x i32>, ptr %1, align 4
160+
%2 = icmp slt <8 x i32> %wide.load, %broadcast.splat
161+
tail call void @llvm.masked.compressstore.v8i32(<8 x i32> %wide.load, ptr align 4 %monotonic.iv, <8 x i1> %2)
162+
%3 = bitcast <8 x i1> %2 to i8
163+
%4 = tail call range(i8 0, 9) i8 @llvm.ctpop.i8(i8 %3)
164+
%5 = shl nuw nsw i8 %4, 2
165+
%6 = zext nneg i8 %5 to i64
166+
%monotonic.add = getelementptr inbounds i8, ptr %monotonic.iv, i64 %6
167+
%index.next = add nuw i64 %index, 8
168+
%7 = icmp eq i64 %index.next, %n.vec
169+
br i1 %7, label %middle.block, label %vector.body
170+
171+
middle.block: ; preds = %vector.body
172+
%cmp.n = icmp eq i64 %n.vec, %wide.trip.count
173+
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader13
174+
175+
for.cond.cleanup: ; preds = %for.inc, %middle.block, %entry
176+
ret void
177+
178+
for.body: ; preds = %for.body.preheader13, %for.inc
179+
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ %indvars.iv.ph, %for.body.preheader13 ]
180+
%dst.addr.09 = phi ptr [ %dst.addr.1, %for.inc ], [ %dst.addr.09.ph, %for.body.preheader13 ]
181+
%arrayidx = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
182+
%8 = load i32, ptr %arrayidx, align 4
183+
%cmp1 = icmp slt i32 %8, %c
184+
br i1 %cmp1, label %if.then, label %for.inc
185+
186+
if.then: ; preds = %for.body
187+
%incdec.ptr = getelementptr inbounds i8, ptr %dst.addr.09, i64 4
188+
store i32 %8, ptr %dst.addr.09, align 4
189+
br label %for.inc
190+
191+
for.inc: ; preds = %for.body, %if.then
192+
%dst.addr.1 = phi ptr [ %incdec.ptr, %if.then ], [ %dst.addr.09, %for.body ]
193+
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
194+
%exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
195+
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
196+
}
197+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
198+
; CHECK: {{.*}}

0 commit comments

Comments
 (0)