Skip to content

Commit 671976f

Browse files
authored
[NFC][LoopVectorize] Add more simple early exit tests (#112529)
I realised we are missing tests to cover more loops with multiple early exits - some countable and some uncountable. I've also added a few SVE versions of the test in the AArch64 directory. Once we can vectorise such early exit loops it's a good sanity check to make sure they also vectorise for SVE. Also, for some of the tests I expect there to be some divergence from the same tests in the top level directory once we start vectorising them.
1 parent e1f8f84 commit 671976f

File tree

2 files changed

+518
-3
lines changed

2 files changed

+518
-3
lines changed
Lines changed: 333 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,333 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2+
; RUN: opt -S < %s -p loop-vectorize | FileCheck %s --check-prefixes=CHECK
3+
4+
target triple = "aarch64-unknown-linux-gnu"
5+
6+
declare void @init_mem(ptr, i64);
7+
8+
define i64 @same_exit_block_pre_inc_use1() #1 {
9+
; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1(
10+
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
11+
; CHECK-NEXT: entry:
12+
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 4
13+
; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 4
14+
; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
15+
; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
16+
; CHECK-NEXT: br label [[LOOP:%.*]]
17+
; CHECK: loop:
18+
; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[INDEX_NEXT1:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[ENTRY:%.*]] ]
19+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX2]]
20+
; CHECK-NEXT: [[TMP38:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
21+
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX2]]
22+
; CHECK-NEXT: [[TMP39:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
23+
; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[TMP38]], [[TMP39]]
24+
; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END:%.*]]
25+
; CHECK: loop.inc:
26+
; CHECK-NEXT: [[INDEX_NEXT1]] = add i64 [[INDEX2]], 1
27+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT1]], 67
28+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
29+
; CHECK: loop.end:
30+
; CHECK-NEXT: [[START_0_LCSSA:%.*]] = phi i64 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ]
31+
; CHECK-NEXT: ret i64 [[START_0_LCSSA]]
32+
;
33+
entry:
34+
%p1 = alloca [1024 x i8]
35+
%p2 = alloca [1024 x i8]
36+
call void @init_mem(ptr %p1, i64 1024)
37+
call void @init_mem(ptr %p2, i64 1024)
38+
br label %loop
39+
40+
loop:
41+
%index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
42+
%arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
43+
%ld1 = load i8, ptr %arrayidx, align 1
44+
%arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
45+
%ld2 = load i8, ptr %arrayidx1, align 1
46+
%cmp3 = icmp eq i8 %ld1, %ld2
47+
br i1 %cmp3, label %loop.inc, label %loop.end
48+
49+
loop.inc:
50+
%index.next = add i64 %index, 1
51+
%exitcond = icmp ne i64 %index.next, 67
52+
br i1 %exitcond, label %loop, label %loop.end
53+
54+
loop.end:
55+
%retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
56+
ret i64 %retval
57+
}
58+
59+
60+
; In this example the early exit block appears in the list of ExitNotTaken
61+
; SCEVs, but is not computable.
62+
define i64 @same_exit_block_pre_inc_use4() {
63+
; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use4() {
64+
; CHECK-NEXT: entry:
65+
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i64], align 8
66+
; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i64], align 8
67+
; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
68+
; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
69+
; CHECK-NEXT: br label [[LOOP1:%.*]]
70+
; CHECK: loop:
71+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[ENTRY:%.*]] ]
72+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]]
73+
; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1
74+
; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]]
75+
; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END:%.*]]
76+
; CHECK: loop.inc:
77+
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
78+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
79+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP1]], label [[LOOP_END]]
80+
; CHECK: loop.end:
81+
; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP1]] ], [ 67, [[LOOP_INC]] ]
82+
; CHECK-NEXT: ret i64 [[RETVAL]]
83+
;
84+
entry:
85+
%p1 = alloca [1024 x i64]
86+
%p2 = alloca [1024 x i64]
87+
call void @init_mem(ptr %p1, i64 1024)
88+
call void @init_mem(ptr %p2, i64 1024)
89+
br label %loop
90+
91+
loop:
92+
%index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
93+
%arrayidx = getelementptr inbounds i64, ptr %p1, i64 %index
94+
%ld1 = load i64, ptr %arrayidx, align 1
95+
%cmp3 = icmp ult i64 %index, %ld1
96+
br i1 %cmp3, label %loop.inc, label %loop.end
97+
98+
loop.inc:
99+
%index.next = add i64 %index, 1
100+
%exitcond = icmp ne i64 %index.next, 67
101+
br i1 %exitcond, label %loop, label %loop.end
102+
103+
loop.end:
104+
%retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
105+
ret i64 %retval
106+
}
107+
108+
109+
define i64 @loop_contains_safe_call() #1 {
110+
; CHECK-LABEL: define i64 @loop_contains_safe_call(
111+
; CHECK-SAME: ) #[[ATTR0]] {
112+
; CHECK-NEXT: entry:
113+
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 4
114+
; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 4
115+
; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
116+
; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
117+
; CHECK-NEXT: br label [[LOOP1:%.*]]
118+
; CHECK: loop:
119+
; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[INDEX_NEXT1:%.*]], [[LOOP_INC1:%.*]] ], [ 3, [[ENTRY:%.*]] ]
120+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX2]]
121+
; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1
122+
; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]])
123+
; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00
124+
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC1]], label [[LOOP_END:%.*]]
125+
; CHECK: loop.inc:
126+
; CHECK-NEXT: [[INDEX_NEXT1]] = add i64 [[INDEX2]], 1
127+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT1]], 67
128+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP1]], label [[LOOP_END]]
129+
; CHECK: loop.end:
130+
; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX2]], [[LOOP1]] ], [ 67, [[LOOP_INC1]] ]
131+
; CHECK-NEXT: ret i64 [[RETVAL]]
132+
;
133+
entry:
134+
%p1 = alloca [1024 x i8]
135+
%p2 = alloca [1024 x i8]
136+
call void @init_mem(ptr %p1, i64 1024)
137+
call void @init_mem(ptr %p2, i64 1024)
138+
br label %loop
139+
140+
loop:
141+
%index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
142+
%arrayidx = getelementptr inbounds float, ptr %p1, i64 %index
143+
%ld1 = load float, ptr %arrayidx, align 1
144+
%sqrt = tail call fast float @llvm.sqrt.f32(float %ld1)
145+
%cmp = fcmp fast ult float %sqrt, 3.0e+00
146+
br i1 %cmp, label %loop.inc, label %loop.end
147+
148+
loop.inc:
149+
%index.next = add i64 %index, 1
150+
%exitcond = icmp ne i64 %index.next, 67
151+
br i1 %exitcond, label %loop, label %loop.end
152+
153+
loop.end:
154+
%retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
155+
ret i64 %retval
156+
}
157+
158+
159+
define i64 @loop_contains_safe_div() #1 {
160+
; CHECK-LABEL: define i64 @loop_contains_safe_div(
161+
; CHECK-SAME: ) #[[ATTR0]] {
162+
; CHECK-NEXT: entry:
163+
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 4
164+
; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 4
165+
; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
166+
; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
167+
; CHECK-NEXT: br label [[LOOP:%.*]]
168+
; CHECK: loop:
169+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[ENTRY:%.*]] ]
170+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
171+
; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
172+
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[LD1]], 20000
173+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[DIV]], 1
174+
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END:%.*]]
175+
; CHECK: loop.inc:
176+
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
177+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
178+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]]
179+
; CHECK: loop.end:
180+
; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ]
181+
; CHECK-NEXT: ret i64 [[RETVAL]]
182+
;
183+
entry:
184+
%p1 = alloca [1024 x i8]
185+
%p2 = alloca [1024 x i8]
186+
call void @init_mem(ptr %p1, i64 1024)
187+
call void @init_mem(ptr %p2, i64 1024)
188+
br label %loop
189+
190+
loop:
191+
%index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
192+
%arrayidx = getelementptr inbounds i32, ptr %p1, i64 %index
193+
%ld1 = load i32, ptr %arrayidx, align 1
194+
%div = udiv i32 %ld1, 20000
195+
%cmp = icmp eq i32 %div, 1
196+
br i1 %cmp, label %loop.inc, label %loop.end
197+
198+
loop.inc:
199+
%index.next = add i64 %index, 1
200+
%exitcond = icmp ne i64 %index.next, 67
201+
br i1 %exitcond, label %loop, label %loop.end
202+
203+
loop.end:
204+
%retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
205+
ret i64 %retval
206+
}
207+
208+
209+
define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align(8) %p2) {
210+
; DEBUG-LABEL: LV: Checking a loop in 'loop_contains_load_after_early_exit'
211+
; DEBUG: LV: Found an early exit loop with symbolic max backedge taken count: 63
212+
; DEBUG-NEXT: LV: We can vectorize this loop!
213+
; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of loops with uncountable early exit is not yet supported.
214+
; CHECK-LABEL: define i64 @loop_contains_load_after_early_exit(
215+
; CHECK-SAME: ptr align 8 dereferenceable(1024) [[P2:%.*]]) {
216+
; CHECK-NEXT: entry:
217+
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 4
218+
; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
219+
; CHECK-NEXT: br label [[LOOP1:%.*]]
220+
; CHECK: loop:
221+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[ENTRY:%.*]] ]
222+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
223+
; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
224+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1
225+
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END:%.*]]
226+
; CHECK: loop.inc:
227+
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]]
228+
; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8
229+
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
230+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
231+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP1]], label [[LOOP_END]]
232+
; CHECK: loop.end:
233+
; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP1]] ], [ [[LD2]], [[LOOP_INC]] ]
234+
; CHECK-NEXT: ret i64 [[RETVAL]]
235+
;
236+
entry:
237+
%p1 = alloca [1024 x i8]
238+
call void @init_mem(ptr %p1, i64 1024)
239+
br label %loop
240+
241+
loop:
242+
%index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
243+
%arrayidx = getelementptr inbounds i32, ptr %p1, i64 %index
244+
%ld1 = load i32, ptr %arrayidx, align 1
245+
%cmp = icmp eq i32 %ld1, 1
246+
br i1 %cmp, label %loop.inc, label %loop.end
247+
248+
loop.inc:
249+
%arrayidx2 = getelementptr inbounds i64, ptr %p2, i64 %index
250+
%ld2 = load i64, ptr %arrayidx2, align 8
251+
%index.next = add i64 %index, 1
252+
%exitcond = icmp ne i64 %index.next, 67
253+
br i1 %exitcond, label %loop, label %loop.end
254+
255+
loop.end:
256+
%retval = phi i64 [ %index, %loop ], [ %ld2, %loop.inc ]
257+
ret i64 %retval
258+
}
259+
260+
261+
; The form of the induction variables requires SCEV predicates.
262+
define i32 @diff_exit_block_needs_scev_check(i32 %end) {
263+
; DEBUG-LABEL: LV: Checking a loop in 'diff_exit_block_needs_scev_check'
264+
; DEBUG: Found an early exit loop with symbolic max backedge taken count: (-1 + (1 umax (zext i10 (trunc i32 %end to i10) to i32)))<nsw>
265+
; DEBUG-NEXT: LV: We can vectorize this loop!
266+
; DEBUG-NEXT: LV: Not vectorizing: Auto-vectorization of loops with uncountable early exit is not yet supported.
267+
; CHECK-LABEL: define i32 @diff_exit_block_needs_scev_check(
268+
; CHECK-SAME: i32 [[END:%.*]]) {
269+
; CHECK-NEXT: entry:
270+
; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i32], align 4
271+
; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i32], align 4
272+
; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
273+
; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
274+
; CHECK-NEXT: [[END_CLAMPED:%.*]] = and i32 [[END]], 1023
275+
; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
276+
; CHECK: for.body:
277+
; CHECK-NEXT: [[IND:%.*]] = phi i8 [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
278+
; CHECK-NEXT: [[GEP_IND:%.*]] = phi i64 [ [[GEP_IND_NEXT:%.*]], [[FOR_INC]] ], [ 0, [[ENTRY]] ]
279+
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[GEP_IND]]
280+
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
281+
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[P2]], i64 [[GEP_IND]]
282+
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
283+
; CHECK-NEXT: [[CMP_EARLY:%.*]] = icmp eq i32 [[TMP0]], [[TMP1]]
284+
; CHECK-NEXT: br i1 [[CMP_EARLY]], label [[FOUND:%.*]], label [[FOR_INC]]
285+
; CHECK: for.inc:
286+
; CHECK-NEXT: [[IND_NEXT]] = add i8 [[IND]], 1
287+
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IND_NEXT]] to i32
288+
; CHECK-NEXT: [[GEP_IND_NEXT]] = add i64 [[GEP_IND]], 1
289+
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[CONV]], [[END_CLAMPED]]
290+
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY1]], label [[EXIT:%.*]]
291+
; CHECK: found:
292+
; CHECK-NEXT: ret i32 1
293+
; CHECK: exit:
294+
; CHECK-NEXT: ret i32 0
295+
;
296+
entry:
297+
%p1 = alloca [1024 x i32]
298+
%p2 = alloca [1024 x i32]
299+
call void @init_mem(ptr %p1, i64 1024)
300+
call void @init_mem(ptr %p2, i64 1024)
301+
%end.clamped = and i32 %end, 1023
302+
br label %for.body
303+
304+
for.body:
305+
%ind = phi i8 [ %ind.next, %for.inc ], [ 0, %entry ]
306+
%gep.ind = phi i64 [ %gep.ind.next, %for.inc ], [ 0, %entry ]
307+
%arrayidx1 = getelementptr inbounds i32, ptr %p1, i64 %gep.ind
308+
%0 = load i32, ptr %arrayidx1, align 4
309+
%arrayidx2 = getelementptr inbounds i32, ptr %p2, i64 %gep.ind
310+
%1 = load i32, ptr %arrayidx2, align 4
311+
%cmp.early = icmp eq i32 %0, %1
312+
br i1 %cmp.early, label %found, label %for.inc
313+
314+
for.inc:
315+
%ind.next = add i8 %ind, 1
316+
%conv = zext i8 %ind.next to i32
317+
%gep.ind.next = add i64 %gep.ind, 1
318+
%cmp = icmp ult i32 %conv, %end.clamped
319+
br i1 %cmp, label %for.body, label %exit
320+
321+
found:
322+
ret i32 1
323+
324+
exit:
325+
ret i32 0
326+
}
327+
328+
329+
declare i32 @foo(i32) readonly
330+
declare <vscale x 4 x i32> @foo_vec(<vscale x 4 x i32>)
331+
332+
attributes #0 = { "vector-function-abi-variant"="_ZGVsNxv_foo(foo_vec)" }
333+
attributes #1 = { "target-features"="+sve" vscale_range(1,16) }

0 commit comments

Comments
 (0)