Skip to content

Commit 4b581e8

Browse files
committed
[LV] Add tests where rt checks may make vectorization unprofitable.
Add a few additional tests which require a large number of runtime checks for D109368.
1 parent 9d0eb44 commit 4b581e8

File tree

2 files changed

+107
-2
lines changed

2 files changed

+107
-2
lines changed

llvm/test/Transforms/LoopVectorize/AArch64/runtime-check-size-based-threshold.ll

Lines changed: 54 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: opt -loop-vectorize -mtriple=arm64-apple-iphoneos -S %s | FileCheck %s
1+
; RUN: opt -loop-vectorize -mtriple=arm64-apple-iphoneos -vectorizer-min-trip-count=8 -S %s | FileCheck %s
22

33
; Tests for loops with large numbers of runtime checks. Check that loops are
44
; vectorized, if the loop trip counts are large and the impact of the runtime
@@ -50,7 +50,7 @@ loop: ; preds = %bb54, %bb37
5050
%gep.4 = getelementptr inbounds i16, i16* %ptr.4, i64 %iv
5151
store i16 %trunc.2, i16* %gep.4, align 2
5252
%iv.next = add nuw nsw i64 %iv, 1
53-
%cmp = icmp ult i64 %iv, 50
53+
%cmp = icmp ult i64 %iv, 10
5454
br i1 %cmp, label %loop, label %exit
5555

5656
exit:
@@ -109,3 +109,55 @@ loop: ; preds = %bb54, %bb37
109109
exit:
110110
ret void
111111
}
112+
113+
define void @test_tc_unknown(i16* %ptr.1, i16* %ptr.2, i16* %ptr.3, i16* %ptr.4, i64 %off.1, i64 %off.2, i64 %N) {
114+
; CHECK-LABEL: define void @test_tc_unknown
115+
; CHECK-NOT: vector.memcheck
116+
; CHECK-NOT: vector.body
117+
;
118+
entry:
119+
br label %loop
120+
121+
loop: ; preds = %bb54, %bb37
122+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
123+
%gep.1 = getelementptr inbounds i16, i16* %ptr.1, i64 %iv
124+
%lv.1 = load i16, i16* %gep.1, align 2
125+
%ext.1 = sext i16 %lv.1 to i32
126+
%gep.2 = getelementptr inbounds i16, i16* %ptr.2, i64 %iv
127+
%lv.2 = load i16, i16* %gep.2, align 2
128+
%ext.2 = sext i16 %lv.2 to i32
129+
%gep.off.1 = getelementptr inbounds i16, i16* %gep.2, i64 %off.1
130+
%lv.3 = load i16, i16* %gep.off.1, align 2
131+
%ext.3 = sext i16 %lv.3 to i32
132+
%gep.off.2 = getelementptr inbounds i16, i16* %gep.2, i64 %off.2
133+
%lv.4 = load i16, i16* %gep.off.2, align 2
134+
%ext.4 = sext i16 %lv.4 to i32
135+
%tmp62 = mul nsw i32 %ext.2, 11
136+
%tmp66 = mul nsw i32 %ext.3, -4
137+
%tmp70 = add nsw i32 %tmp62, 4
138+
%tmp71 = add nsw i32 %tmp70, %tmp66
139+
%tmp72 = add nsw i32 %tmp71, %ext.4
140+
%tmp73 = lshr i32 %tmp72, 3
141+
%tmp74 = add nsw i32 %tmp73, %ext.1
142+
%tmp75 = lshr i32 %tmp74, 1
143+
%tmp76 = mul nsw i32 %ext.2, 5
144+
%tmp77 = shl nsw i32 %ext.3, 2
145+
%tmp78 = add nsw i32 %tmp76, 4
146+
%tmp79 = add nsw i32 %tmp78, %tmp77
147+
%tmp80 = sub nsw i32 %tmp79, %ext.4
148+
%tmp81 = lshr i32 %tmp80, 3
149+
%tmp82 = sub nsw i32 %tmp81, %ext.1
150+
%tmp83 = lshr i32 %tmp82, 1
151+
%trunc.1 = trunc i32 %tmp75 to i16
152+
%gep.3 = getelementptr inbounds i16, i16* %ptr.3, i64 %iv
153+
store i16 %trunc.1, i16* %gep.3, align 2
154+
%trunc.2 = trunc i32 %tmp83 to i16
155+
%gep.4 = getelementptr inbounds i16, i16* %ptr.4, i64 %iv
156+
store i16 %trunc.2, i16* %gep.4, align 2
157+
%iv.next = add nuw nsw i64 %iv, 1
158+
%cmp = icmp ult i64 %iv, %N
159+
br i1 %cmp, label %loop, label %exit
160+
161+
exit:
162+
ret void
163+
}
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
; REQUIRES: asserts
2+
3+
; RUN: opt -runtime-memory-check-threshold=9 -passes='loop-vectorize' -mtriple=x86_64-unknown-linux -S -debug %s 2>&1 | FileCheck %s
4+
5+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6+
7+
target triple = "x86_64-unknown-linux"
8+
9+
declare double @llvm.pow.f64(double, double)
10+
11+
; Test case where the memory runtime checks and vector body is more expensive
12+
; than running the scalar loop.
13+
; TODO: should not be vectorized.
14+
define void @test(double* nocapture %A, double* nocapture %B, double* nocapture %C, double* nocapture %D, double* nocapture %E) {
15+
; CHECK-LABEL: @test(
16+
; CHECK: vector.memcheck
17+
; CHECK: vector.body
18+
;
19+
entry:
20+
br label %for.body
21+
22+
for.body:
23+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
24+
%gep.A = getelementptr inbounds double, double* %A, i64 %iv
25+
%l.A = load double, double* %gep.A, align 4
26+
store double 0.0, double* %gep.A, align 4
27+
%p.1 = call double @llvm.pow.f64(double %l.A, double 2.0)
28+
29+
%gep.B = getelementptr inbounds double, double* %B, i64 %iv
30+
%l.B = load double, double* %gep.B, align 4
31+
%p.2 = call double @llvm.pow.f64(double %l.B, double %p.1)
32+
store double 0.0, double* %gep.B, align 4
33+
34+
%gep.C = getelementptr inbounds double, double* %C, i64 %iv
35+
%l.C = load double, double* %gep.C, align 4
36+
%p.3 = call double @llvm.pow.f64(double %p.1, double %l.C)
37+
38+
%gep.D = getelementptr inbounds double, double* %D, i64 %iv
39+
%l.D = load double, double* %gep.D
40+
%p.4 = call double @llvm.pow.f64(double %p.3, double %l.D)
41+
%p.5 = call double @llvm.pow.f64(double %p.4, double %p.3)
42+
%mul = fmul double 2.0, %p.5
43+
%mul.2 = fmul double %mul, 2.0
44+
%mul.3 = fmul double %mul, %mul.2
45+
%gep.E = getelementptr inbounds double, double* %E, i64 %iv
46+
store double %mul.3, double* %gep.E, align 4
47+
%iv.next = add i64 %iv, 1
48+
%exitcond = icmp eq i64 %iv.next, 16
49+
br i1 %exitcond, label %for.end, label %for.body
50+
51+
for.end:
52+
ret void
53+
}

0 commit comments

Comments
 (0)