Skip to content

Commit 2e4c5d1

Browse files
committed
CoroSplit: Fix coroutine splitting for retcon and retcon.once
Summary: For retcon and retcon.once coroutines we assume that all uses of spills can be sunk past coro.begin. This simplifies handling of instructions that escape the address of an alloca. The current implementation would have issues if the address of the alloca is escaped before coro.begin. (It also has issues with casts before and uses of those casts after the coro.begin instruction) %alloca_addr = alloca ... %escape = ptrtoint %alloca_addr coro.begin store %escape to %alloca_addr rdar://60272809 Subscribers: hiraditya, modocache, mgrang, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81023
1 parent c2e27ac commit 2e4c5d1

File tree

2 files changed

+131
-0
lines changed

2 files changed

+131
-0
lines changed

llvm/lib/Transforms/Coroutines/CoroFrame.cpp

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -899,6 +899,23 @@ static Instruction *insertSpills(const SpillInfo &Spills, coro::Shape &Shape) {
899899
FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
900900
SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
901901
Shape.AllocaSpillBlock = SpillBlock;
902+
903+
// retcon and retcon.once lowering assumes all uses have been sunk.
904+
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) {
905+
// If we found any allocas, replace all of their remaining uses with Geps.
906+
Builder.SetInsertPoint(&SpillBlock->front());
907+
for (auto &P : Allocas) {
908+
auto *G = GetFramePointer(P.second, P.first);
909+
910+
// We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
911+
// here, as we are changing location of the instruction.
912+
G->takeName(P.first);
913+
P.first->replaceAllUsesWith(G);
914+
P.first->eraseFromParent();
915+
}
916+
return FramePtr;
917+
}
918+
902919
// If we found any alloca, replace all of their remaining uses with GEP
903920
// instructions. Because new dbg.declare have been created for these alloca,
904921
// we also delete the original dbg.declare and replace other uses with undef.
@@ -1482,6 +1499,55 @@ static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
14821499
}
14831500
}
14841501

1502+
/// retcon and retcon.once conventions assume that all spill uses can be sunk
1503+
/// after the coro.begin intrinsic.
1504+
static void sinkSpillUsesAfterCoroBegin(Function &F, const SpillInfo &Spills,
1505+
CoroBeginInst *CoroBegin) {
1506+
DominatorTree Dom(F);
1507+
1508+
SmallSetVector<Instruction *, 32> ToMove;
1509+
SmallVector<Instruction *, 32> Worklist;
1510+
1511+
// Collect all users that precede coro.begin.
1512+
for (auto const &Entry : Spills) {
1513+
auto *SpillDef = Entry.def();
1514+
for (User *U : SpillDef->users()) {
1515+
auto Inst = cast<Instruction>(U);
1516+
if (Inst->getParent() != CoroBegin->getParent() ||
1517+
Dom.dominates(CoroBegin, Inst))
1518+
continue;
1519+
if (ToMove.insert(Inst))
1520+
Worklist.push_back(Inst);
1521+
}
1522+
}
1523+
// Recursively collect users before coro.begin.
1524+
while (!Worklist.empty()) {
1525+
auto *Def = Worklist.back();
1526+
Worklist.pop_back();
1527+
for (User *U : Def->users()) {
1528+
auto Inst = cast<Instruction>(U);
1529+
if (Dom.dominates(CoroBegin, Inst))
1530+
continue;
1531+
if (ToMove.insert(Inst))
1532+
Worklist.push_back(Inst);
1533+
}
1534+
}
1535+
1536+
// Sort by dominance.
1537+
SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
1538+
std::sort(InsertionList.begin(), InsertionList.end(),
1539+
[&Dom](Instruction *A, Instruction *B) -> bool {
1540+
// If a dominates b it should preceed (<) b.
1541+
return Dom.dominates(A, B);
1542+
});
1543+
1544+
Instruction *InsertPt = CoroBegin->getNextNode();
1545+
for (Instruction *Inst : InsertionList)
1546+
Inst->moveBefore(InsertPt);
1547+
1548+
return;
1549+
}
1550+
14851551
void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
14861552
eliminateSwiftError(F, Shape);
14871553

@@ -1618,6 +1684,8 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
16181684
}
16191685
}
16201686
LLVM_DEBUG(dump("Spills", Spills));
1687+
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce)
1688+
sinkSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin);
16211689
Shape.FrameTy = buildFrameType(F, Shape, Spills);
16221690
Shape.FramePtr = insertSpills(Spills, Shape);
16231691
lowerLocalAllocas(LocalAllocas, DeadInstructions);
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
; RUN: opt < %s -coro-split -S | FileCheck %s
2+
3+
target datalayout = "p:64:64:64"
4+
5+
declare void @prototype_f(i8*, i1)
6+
7+
declare noalias i8* @allocate(i32 %size)
8+
declare void @deallocate(i8* %ptr)
9+
declare void @init(i64 *%ptr)
10+
declare void @use(i8* %ptr)
11+
declare void @use_addr_val(i64 %val, {i64, i64}*%addr)
12+
13+
define { i8*, {i64, i64}* } @f(i8* %buffer) "coroutine.presplit"="1" {
14+
entry:
15+
%tmp = alloca { i64, i64 }, align 8
16+
%proj.1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 0
17+
%proj.2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 1
18+
store i64 0, i64* %proj.1, align 8
19+
store i64 0, i64* %proj.2, align 8
20+
%cast = bitcast { i64, i64 }* %tmp to i8*
21+
%escape_addr = ptrtoint {i64, i64}* %tmp to i64
22+
%id = call token @llvm.coro.id.retcon.once(i32 32, i32 8, i8* %buffer, i8* bitcast (void (i8*, i1)* @prototype_f to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
23+
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
24+
%proj.2.2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 1
25+
call void @init(i64 * %proj.1)
26+
call void @init(i64 * %proj.2.2)
27+
call void @use_addr_val(i64 %escape_addr, {i64, i64}* %tmp)
28+
%abort = call i1 (...) @llvm.coro.suspend.retcon.i1({i64, i64}* %tmp)
29+
br i1 %abort, label %end, label %resume
30+
31+
resume:
32+
call void @use(i8* %cast)
33+
br label %end
34+
35+
end:
36+
call i1 @llvm.coro.end(i8* %hdl, i1 0)
37+
unreachable
38+
}
39+
; Make sure we don't lose writes to the frame.
40+
; CHECK-LABEL: define { i8*, { i64, i64 }* } @f(i8* %buffer) {
41+
; CHECK: [[FRAMEPTR:%.*]] = bitcast i8* %buffer to %f.Frame*
42+
; CHECK: [[TMP:%.*]] = getelementptr inbounds %f.Frame, %f.Frame* [[FRAMEPTR]], i32 0, i32 0
43+
; CHECK: [[PROJ1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP]], i64 0, i32 0
44+
; CHECK: [[PROJ2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP]], i64 0, i32 1
45+
; CHECK: store i64 0, i64* [[PROJ1]]
46+
; CHECK: store i64 0, i64* [[PROJ2]]
47+
; CHECK: [[ESCAPED_ADDR:%.*]] = ptrtoint { i64, i64 }* [[TMP]] to i64
48+
; CHECK: call void @init(i64* [[PROJ1]])
49+
; CHECK: call void @init(i64* [[PROJ2]])
50+
; CHECK: call void @use_addr_val(i64 [[ESCAPED_ADDR]], { i64, i64 }* [[TMP]])
51+
52+
; CHECK-LABEL: define internal void @f.resume.0(i8* {{.*}} %0, i1 %1) {
53+
; CHECK: [[FRAMEPTR:%.*]] = bitcast i8* %0 to %f.Frame*
54+
; CHECK: resume:
55+
; CHECK: [[TMP:%.*]] = getelementptr inbounds %f.Frame, %f.Frame* [[FRAMEPTR]], i32 0, i32 0
56+
; CHECK: [[CAST:%.*]] = bitcast { i64, i64 }* [[TMP]] to i8*
57+
; CHECK: call void @use(i8* [[CAST]])
58+
59+
declare token @llvm.coro.id.retcon.once(i32, i32, i8*, i8*, i8*, i8*)
60+
declare i8* @llvm.coro.begin(token, i8*)
61+
declare i1 @llvm.coro.suspend.retcon.i1(...)
62+
declare i1 @llvm.coro.end(i8*, i1)
63+

0 commit comments

Comments
 (0)