Skip to content

Commit 3cf24db

Browse files
committed
[LV] Complete load groups and release store groups. Try 2.
This is a complete fix for CompleteLoadGroups introduced in D154309. We need to check for dependency between A and every member of the load Group of B. This patch also fixes another miscompile seen when we incorrectly sink stores below a depending load (see testcase in interleaved-accesses-sink-store-across-load.ll). This is fixed by releasing store groups correctly. This change was previously reverted (e85fd3c) due to Asan failure with use-after-free error. A testcase is added and the bug is fixed in this version of the patch. Differential Revision: https://reviews.llvm.org/D155520
1 parent ea72a4e commit 3cf24db

File tree

5 files changed

+251
-88
lines changed

5 files changed

+251
-88
lines changed

llvm/lib/Analysis/VectorUtils.cpp

Lines changed: 58 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1158,14 +1158,11 @@ void InterleavedAccessInfo::analyzeInterleaving(
11581158
LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
11591159
<< '\n');
11601160
GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1161-
} else if (CompletedLoadGroups.contains(GroupB)) {
1162-
// Skip B if no new instructions can be added to its load group.
1163-
continue;
1161+
if (B->mayWriteToMemory())
1162+
StoreGroups.insert(GroupB);
1163+
else
1164+
LoadGroups.insert(GroupB);
11641165
}
1165-
if (B->mayWriteToMemory())
1166-
StoreGroups.insert(GroupB);
1167-
else
1168-
LoadGroups.insert(GroupB);
11691166
}
11701167

11711168
for (auto AI = std::next(BI); AI != E; ++AI) {
@@ -1191,38 +1188,62 @@ void InterleavedAccessInfo::analyzeInterleaving(
11911188
// Because accesses (2) and (3) are dependent, we can group (2) with (1)
11921189
// but not with (4). If we did, the dependent access (3) would be within
11931190
// the boundaries of the (2, 4) group.
1194-
if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
1195-
// If a dependence exists and A is already in a group, we know that A
1196-
// must be a store since A precedes B and WAR dependences are allowed.
1197-
// Thus, A would be sunk below B. We release A's group to prevent this
1198-
// illegal code motion. A will then be free to form another group with
1199-
// instructions that precede it.
1200-
if (isInterleaved(A)) {
1201-
InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1202-
1203-
LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1204-
"dependence between " << *A << " and "<< *B << '\n');
1205-
1206-
StoreGroups.remove(StoreGroup);
1207-
releaseGroup(StoreGroup);
1208-
}
1209-
// If B is a load and part of an interleave group, no earlier loads can
1210-
// be added to B's interleave group, because this would mean the load B
1211-
// would need to be moved across store A. Mark the interleave group as
1212-
// complete.
1213-
if (GroupB && isa<LoadInst>(B)) {
1214-
LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1215-
<< " as complete.\n");
1216-
1217-
CompletedLoadGroups.insert(GroupB);
1191+
auto DependentMember = [&](InterleaveGroup<Instruction> *Group,
1192+
StrideEntry *A) -> Instruction * {
1193+
for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) {
1194+
Instruction *MemberOfGroupB = Group->getMember(Index);
1195+
if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups(
1196+
A, &*AccessStrideInfo.find(MemberOfGroupB)))
1197+
return MemberOfGroupB;
12181198
}
1199+
return nullptr;
1200+
};
12191201

1220-
// If a dependence exists and A is not already in a group (or it was
1221-
// and we just released it), B might be hoisted above A (if B is a
1222-
// load) or another store might be sunk below A (if B is a store). In
1223-
// either case, we can't add additional instructions to B's group. B
1224-
// will only form a group with instructions that it precedes.
1225-
break;
1202+
auto GroupA = getInterleaveGroup(A);
1203+
// If A is a load, dependencies are tolerable, there's nothing to do here.
1204+
// If both A and B belong to the same (store) group, they are independent,
1205+
// even if dependencies have not been recorded.
1206+
// If both GroupA and GroupB are null, there's nothing to do here.
1207+
if (A->mayWriteToMemory() && GroupA != GroupB) {
1208+
Instruction *DependentInst = nullptr;
1209+
// If GroupB is a load group, we have to compare AI against all
1210+
// members of GroupB because if any load within GroupB has a dependency
1211+
// on AI, we need to mark GroupB as complete and also release the
1212+
// store GroupA (if A belongs to one). The former prevents incorrect
1213+
// hoisting of load B above store A while the latter prevents incorrect
1214+
// sinking of store A below load B.
1215+
if (GroupB && LoadGroups.contains(GroupB))
1216+
DependentInst = DependentMember(GroupB, &*AI);
1217+
else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI))
1218+
DependentInst = B;
1219+
1220+
if (DependentInst) {
1221+
// A has a store dependence on B (or on some load within GroupB) and
1222+
// is part of a store group. Release A's group to prevent illegal
1223+
// sinking of A below B. A will then be free to form another group
1224+
// with instructions that precede it.
1225+
if (GroupA && StoreGroups.contains(GroupA)) {
1226+
LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1227+
"dependence between "
1228+
<< *A << " and " << *DependentInst << '\n');
1229+
StoreGroups.remove(GroupA);
1230+
releaseGroup(GroupA);
1231+
}
1232+
// If B is a load and part of an interleave group, no earlier loads
1233+
// can be added to B's interleave group, because this would mean the
1234+
// DependentInst would move across store A. Mark the interleave group
1235+
// as complete.
1236+
if (GroupB && LoadGroups.contains(GroupB)) {
1237+
LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1238+
<< " as complete.\n");
1239+
CompletedLoadGroups.insert(GroupB);
1240+
}
1241+
}
1242+
}
1243+
if (CompletedLoadGroups.contains(GroupB)) {
1244+
// Skip trying to add A to B, continue to look for other conflicting A's
1245+
// in groups to be released.
1246+
continue;
12261247
}
12271248

12281249
// At this point, we've checked for illegal code motion. If either A or B

llvm/test/Transforms/LoopVectorize/X86/interleaved-accesses-hoist-load-across-store.ll

Lines changed: 55 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,6 @@ exit:
121121
; compare against the obstructing stores (%l2 versus the store) there is no
122122
; dependency. However, the other load in %l2's interleave group (%l3) does
123123
; obstruct with the store.
124-
; FIXME: The test case is currently mis-compiled.
125124
define void @pr63602_2(ptr %arr) {
126125
; CHECK-LABEL: define void @pr63602_2
127126
; CHECK-SAME: (ptr [[ARR:%.*]]) {
@@ -140,40 +139,64 @@ define void @pr63602_2(ptr %arr) {
140139
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[INDEX]], 3
141140
; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = add i64 1, [[TMP5]]
142141
; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX2]], 0
143-
; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP6]], 4
144-
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP7]]
145-
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 -2
146-
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP9]], align 4
142+
; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX2]], 3
143+
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX2]], 6
144+
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX2]], 9
145+
; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[TMP6]], 4
146+
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP10]]
147+
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0
148+
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP12]], align 4
147149
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
148-
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
149-
; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
150-
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP1]]
151-
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP2]]
152-
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP3]]
153-
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP4]]
154-
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[STRIDED_VEC4]], i32 0
155-
; CHECK-NEXT: store i32 [[TMP14]], ptr [[TMP10]], align 4
156-
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[STRIDED_VEC4]], i32 1
157-
; CHECK-NEXT: store i32 [[TMP15]], ptr [[TMP11]], align 4
158-
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[STRIDED_VEC4]], i32 2
159-
; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP12]], align 4
160-
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STRIDED_VEC4]], i32 3
150+
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP1]]
151+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP2]]
152+
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP3]]
153+
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP4]]
154+
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 0
161155
; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP13]], align 4
162-
; CHECK-NEXT: [[TMP18:%.*]] = add <4 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC]]
163-
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[TMP18]], i32 0
164-
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP10]], align 4
165-
; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[TMP18]], i32 1
166-
; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP11]], align 4
167-
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[TMP18]], i32 2
168-
; CHECK-NEXT: store i32 [[TMP21]], ptr [[TMP12]], align 4
169-
; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP18]], i32 3
170-
; CHECK-NEXT: store i32 [[TMP22]], ptr [[TMP13]], align 4
156+
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 1
157+
; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP14]], align 4
158+
; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 2
159+
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP15]], align 4
160+
; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3
161+
; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP16]], align 4
162+
; CHECK-NEXT: [[TMP21:%.*]] = add nuw nsw i64 [[TMP6]], 2
163+
; CHECK-NEXT: [[TMP22:%.*]] = add nuw nsw i64 [[TMP7]], 2
164+
; CHECK-NEXT: [[TMP23:%.*]] = add nuw nsw i64 [[TMP8]], 2
165+
; CHECK-NEXT: [[TMP24:%.*]] = add nuw nsw i64 [[TMP9]], 2
166+
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP21]]
167+
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP22]]
168+
; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP23]]
169+
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP24]]
170+
; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP13]], align 4
171+
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP14]], align 4
172+
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP15]], align 4
173+
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4
174+
; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP29]], i32 0
175+
; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 1
176+
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP31]], i32 2
177+
; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i32> [[TMP35]], i32 [[TMP32]], i32 3
178+
; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP25]], align 4
179+
; CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP26]], align 4
180+
; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP27]], align 4
181+
; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP28]], align 4
182+
; CHECK-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> poison, i32 [[TMP37]], i32 0
183+
; CHECK-NEXT: [[TMP42:%.*]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP38]], i32 1
184+
; CHECK-NEXT: [[TMP43:%.*]] = insertelement <4 x i32> [[TMP42]], i32 [[TMP39]], i32 2
185+
; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i32> [[TMP43]], i32 [[TMP40]], i32 3
186+
; CHECK-NEXT: [[TMP45:%.*]] = add <4 x i32> [[TMP36]], [[TMP44]]
187+
; CHECK-NEXT: [[TMP46:%.*]] = extractelement <4 x i32> [[TMP45]], i32 0
188+
; CHECK-NEXT: store i32 [[TMP46]], ptr [[TMP13]], align 4
189+
; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i32> [[TMP45]], i32 1
190+
; CHECK-NEXT: store i32 [[TMP47]], ptr [[TMP14]], align 4
191+
; CHECK-NEXT: [[TMP48:%.*]] = extractelement <4 x i32> [[TMP45]], i32 2
192+
; CHECK-NEXT: store i32 [[TMP48]], ptr [[TMP15]], align 4
193+
; CHECK-NEXT: [[TMP49:%.*]] = extractelement <4 x i32> [[TMP45]], i32 3
194+
; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP16]], align 4
171195
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
172-
; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
173-
; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
196+
; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
197+
; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
174198
; CHECK: middle.block:
175-
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 17, 16
176-
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
199+
; CHECK-NEXT: br label [[SCALAR_PH]]
177200
; CHECK: scalar.ph:
178201
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 49, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ]
179202
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 52, [[MIDDLE_BLOCK]] ], [ 4, [[ENTRY]] ]
@@ -195,7 +218,7 @@ define void @pr63602_2(ptr %arr) {
195218
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_IV_2]], align 4
196219
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 3
197220
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[IV_2]], 50
198-
; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
221+
; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
199222
; CHECK: exit:
200223
; CHECK-NEXT: ret void
201224
;

llvm/test/Transforms/LoopVectorize/X86/interleaved-accesses-sink-store-across-load.ll

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,9 @@
33
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2"
44
target triple = "x86_64-apple-macos"
55

6-
; This is currently miscompiled.
7-
; %l2 load and the preceeding store has a dependency. However, we currently sink
6+
; %l2 load and the preceeding store has a dependency. We should not sink
87
; that store into the last store (by creating an interleaved store group). This
9-
; means the loaded %l2 has incorrect value.
10-
; We do not release this store group correctly because the next interleave group
11-
; chosen compares only the memory access of last load in program (%l3) against the dependent store location
12-
; (%gep.iv.1.plus.2) and they are different, thereby incorrectly assuming no
13-
; dependency. We need to compare against all loads in that interleaved group
14-
; (%l2 is part of it).
8+
; means the loaded %l2 will have incorrect value.
159
define void @avoid_sinking_store_across_load(ptr %arr) {
1610
; CHECK-LABEL: define void @avoid_sinking_store_across_load
1711
; CHECK-SAME: (ptr [[ARR:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -28,26 +22,28 @@ define void @avoid_sinking_store_across_load(ptr %arr) {
2822
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
2923
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 4
3024
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP2]]
31-
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 -2
25+
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
3226
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP4]], align 4
3327
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
34-
; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
35-
; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
3628
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[ARR]], <4 x i64> [[VEC_IND2]]
3729
; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], <i64 2, i64 2, i64 2, i64 2>
3830
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[ARR]], <4 x i64> [[TMP6]]
39-
; CHECK-NEXT: [[TMP8:%.*]] = mul <4 x i32> [[STRIDED_VEC5]], <i32 25, i32 25, i32 25, i32 25>
31+
; CHECK-NEXT: [[TMP8:%.*]] = mul <4 x i32> [[STRIDED_VEC]], <i32 25, i32 25, i32 25, i32 25>
4032
; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP8]], <4 x ptr> [[TMP7]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
41-
; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC]]
42-
; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP9]], <4 x ptr> [[TMP5]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
33+
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x ptr> [[TMP7]], i32 0
34+
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[TMP9]], i32 0
35+
; CHECK-NEXT: [[WIDE_VEC4:%.*]] = load <12 x i32>, ptr [[TMP10]], align 4
36+
; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <12 x i32> [[WIDE_VEC4]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
37+
; CHECK-NEXT: [[STRIDED_VEC6:%.*]] = shufflevector <12 x i32> [[WIDE_VEC4]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
38+
; CHECK-NEXT: [[TMP11:%.*]] = add <4 x i32> [[STRIDED_VEC6]], [[STRIDED_VEC5]]
39+
; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP11]], <4 x ptr> [[TMP5]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
4340
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
4441
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 12, i64 12, i64 12, i64 12>
4542
; CHECK-NEXT: [[VEC_IND_NEXT3]] = add <4 x i64> [[VEC_IND2]], <i64 12, i64 12, i64 12, i64 12>
46-
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
47-
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
43+
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
44+
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
4845
; CHECK: middle.block:
49-
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 17, 16
50-
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
46+
; CHECK-NEXT: br label [[SCALAR_PH]]
5147
; CHECK: scalar.ph:
5248
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 49, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ]
5349
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 52, [[MIDDLE_BLOCK]] ], [ 4, [[ENTRY]] ]
@@ -70,7 +66,7 @@ define void @avoid_sinking_store_across_load(ptr %arr) {
7066
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_IV_2]], align 4
7167
; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 3
7268
; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[IV_2]], 50
73-
; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
69+
; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
7470
; CHECK: exit:
7571
; CHECK-NEXT: ret void
7672
;
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -max-dependences=0 -S %s | FileCheck %s
2+
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
3+
4+
; None of these stores have dependences between them, so we can successfully
5+
; interleave them even though the max-dependences threshold is 0.
6+
define void @three_interleaved_stores(ptr %arr) {
7+
; CHECK-LABEL: define void @three_interleaved_stores
8+
; CHECK: store <12 x i8>
9+
entry:
10+
br label %loop
11+
12+
loop:
13+
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
14+
%i.plus.1 = add nuw nsw i64 %i, 1
15+
%i.plus.2 = add nuw nsw i64 %i, 2
16+
%gep.i.plus.0 = getelementptr inbounds i8, ptr %arr, i64 %i
17+
%gep.i.plus.1 = getelementptr inbounds i8, ptr %arr, i64 %i.plus.1
18+
%gep.i.plus.2 = getelementptr inbounds i8, ptr %arr, i64 %i.plus.2
19+
store i8 1, ptr %gep.i.plus.0
20+
store i8 1, ptr %gep.i.plus.1
21+
store i8 1, ptr %gep.i.plus.2
22+
%i.next = add nuw nsw i64 %i, 3
23+
%icmp = icmp ugt i64 %i, 1032
24+
br i1 %icmp, label %exit, label %loop
25+
26+
exit:
27+
ret void
28+
}

0 commit comments

Comments
 (0)