Skip to content

Commit 7a49d80

Browse files
committed
[VPlan] Skip users outside loop in check for exit pre-compute candidates
When collecting candidates to pre-compute cost for operands of exit conditions, skip users outside the loop when checking if they are in ExistInstrs. The users outside the loop should be ignored, as they won't make a value live in the VPlan. This fixes a failure when building for X86 with sanitizers on macOS after b841e2e (https://green.lab.llvm.org/job/llvm.org/job/clang-stage2-cmake-RgSan/287/)
1 parent 6598795 commit 7a49d80

File tree

2 files changed

+119
-2
lines changed

2 files changed

+119
-2
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7332,8 +7332,9 @@ InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan,
73327332
Cost += CostCtx.getLegacyCost(CondI, VF);
73337333
for (Value *Op : CondI->operands()) {
73347334
auto *OpI = dyn_cast<Instruction>(Op);
7335-
if (!OpI || any_of(OpI->users(), [&ExitInstrs](User *U) {
7336-
return !ExitInstrs.contains(cast<Instruction>(U));
7335+
if (!OpI || any_of(OpI->users(), [&ExitInstrs, this](User *U) {
7336+
return OrigLoop->contains(cast<Instruction>(U)->getParent()) &&
7337+
!ExitInstrs.contains(cast<Instruction>(U));
73377338
}))
73387339
continue;
73397340
ExitInstrs.insert(OpI);
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt -p loop-vectorize -mtriple=x86_64-apple-macosx -mcpu=penryn -S %s | FileCheck %s
3+
4+
define i64 @test_value_in_exit_compare_chain_used_outside(ptr %src, i64 %x, i64 range(i64 1, 32) %N) {
5+
; CHECK-LABEL: define i64 @test_value_in_exit_compare_chain_used_outside(
6+
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[X:%.*]], i64 range(i64 1, 32) [[N:%.*]]) #[[ATTR0:[0-9]+]] {
7+
; CHECK-NEXT: [[ENTRY:.*]]:
8+
; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[N]], -1
9+
; CHECK-NEXT: [[TMP1:%.*]] = freeze i64 [[TMP0]]
10+
; CHECK-NEXT: [[UMIN2:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[X]])
11+
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[UMIN2]], 1
12+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 8
13+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
14+
; CHECK: [[VECTOR_SCEVCHECK]]:
15+
; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[N]], -1
16+
; CHECK-NEXT: [[TMP4:%.*]] = freeze i64 [[TMP3]]
17+
; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[X]])
18+
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[UMIN]] to i1
19+
; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[UMIN]], 1
20+
; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
21+
; CHECK-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
22+
; CHECK: [[VECTOR_PH]]:
23+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
24+
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
25+
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 8, i64 [[N_MOD_VF]]
26+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP9]]
27+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
28+
; CHECK: [[VECTOR_BODY]]:
29+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
30+
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ]
31+
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
32+
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1
33+
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2
34+
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 3
35+
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 4
36+
; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 5
37+
; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 6
38+
; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 7
39+
; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP10]], 1
40+
; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP11]], 1
41+
; CHECK-NEXT: [[TMP20:%.*]] = and i64 [[TMP12]], 1
42+
; CHECK-NEXT: [[TMP21:%.*]] = and i64 [[TMP13]], 1
43+
; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP14]], 1
44+
; CHECK-NEXT: [[TMP23:%.*]] = and i64 [[TMP15]], 1
45+
; CHECK-NEXT: [[TMP24:%.*]] = and i64 [[TMP16]], 1
46+
; CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP17]], 1
47+
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP18]]
48+
; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0
49+
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP27]], i32 -7
50+
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP28]], align 1
51+
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <8 x i8> [[WIDE_LOAD]], <8 x i8> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
52+
; CHECK-NEXT: [[TMP29]] = xor <8 x i8> [[REVERSE]], [[VEC_PHI]]
53+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
54+
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
55+
; CHECK-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
56+
; CHECK: [[MIDDLE_BLOCK]]:
57+
; CHECK-NEXT: [[TMP31:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP29]])
58+
; CHECK-NEXT: br label %[[SCALAR_PH]]
59+
; CHECK: [[SCALAR_PH]]:
60+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
61+
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ [[TMP31]], %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ENTRY]] ]
62+
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
63+
; CHECK: [[LOOP_HEADER]]:
64+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
65+
; CHECK-NEXT: [[XOR_RED:%.*]] = phi i8 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR_RED_NEXT:%.*]], %[[LOOP_LATCH]] ]
66+
; CHECK-NEXT: [[IV_AND:%.*]] = and i64 [[IV]], 1
67+
; CHECK-NEXT: [[X_INC:%.*]] = add i64 [[IV_AND]], [[X]]
68+
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_AND]]
69+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[X_INC]], 0
70+
; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT_1:.*]], label %[[LOOP_LATCH]]
71+
; CHECK: [[LOOP_LATCH]]:
72+
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1
73+
; CHECK-NEXT: [[XOR_RED_NEXT]] = xor i8 [[L]], [[XOR_RED]]
74+
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
75+
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
76+
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_2:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
77+
; CHECK: [[EXIT_1]]:
78+
; CHECK-NEXT: [[X_INC_LCSSA:%.*]] = phi i64 [ [[X_INC]], %[[LOOP_HEADER]] ]
79+
; CHECK-NEXT: ret i64 [[X_INC_LCSSA]]
80+
; CHECK: [[EXIT_2]]:
81+
; CHECK-NEXT: [[XOR_RED_NEXT_LCSSA:%.*]] = phi i8 [ [[XOR_RED_NEXT]], %[[LOOP_LATCH]] ]
82+
; CHECK-NEXT: [[R:%.*]] = zext i8 [[XOR_RED_NEXT_LCSSA]] to i64
83+
; CHECK-NEXT: ret i64 [[R]]
84+
;
85+
entry:
86+
br label %loop.header
87+
88+
loop.header:
89+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
90+
%xor.red = phi i8 [ 0, %entry ], [ %xor.red.next, %loop.latch ]
91+
%iv.and = and i64 %iv, 1
92+
%x.inc = add i64 %iv.and, %x
93+
%gep.src = getelementptr i8, ptr %src, i64 %iv.and
94+
%cmp = icmp eq i64 %x.inc, 0
95+
br i1 %cmp, label %exit.1, label %loop.latch
96+
97+
loop.latch:
98+
%l = load i8, ptr %gep.src, align 1
99+
%xor.red.next = xor i8 %l, %xor.red
100+
%iv.next = add i64 %iv, 1
101+
%ec = icmp eq i64 %iv.next, %N
102+
br i1 %ec, label %exit.2, label %loop.header
103+
104+
exit.1:
105+
ret i64 %x.inc
106+
107+
exit.2:
108+
%r = zext i8 %xor.red.next to i64
109+
ret i64 %r
110+
}
111+
;.
112+
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
113+
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
114+
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
115+
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
116+
;.

0 commit comments

Comments
 (0)