Skip to content

Commit cedaf31

Browse files
committed
Fix PMO to not scalarize empty tuple
Currently PMO tries to scalarize empty tuple, and ends up deleting store of an empty tuple. This causes a cascade of problems. Mem2Reg can end up seeing loads without stores of empty tuple type, and creates undef while optimizing the alloca away. This is bad, we don't want to create undef's unnecessarily in SIL. Some optimization can queiry the function or the block on the undef leading to nullptr and a compiler crash. Fixes rdar://94829482
1 parent 4dffe27 commit cedaf31

File tree

2 files changed

+27
-9
lines changed

2 files changed

+27
-9
lines changed

lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -278,19 +278,23 @@ bool ElementUseCollector::collectUses(SILValue Pointer) {
278278

279279
// Loads are a use of the value.
280280
if (isa<LoadInst>(User) || isa<LoadBorrowInst>(User)) {
281-
if (PointeeType.is<TupleType>())
282-
UsesToScalarize.push_back(User);
283-
else
281+
if (auto tupleType = PointeeType.getAs<TupleType>()) {
282+
if (!tupleType->isEqual(Module.getASTContext().TheEmptyTupleType)) {
283+
UsesToScalarize.push_back(User);
284+
}
285+
} else
284286
Uses.emplace_back(User, PMOUseKind::Load);
285287
continue;
286288
}
287289

288290
// Stores *to* the allocation are writes.
289291
if (auto *si = dyn_cast<StoreInst>(User)) {
290292
if (UI->getOperandNumber() == StoreInst::Dest) {
291-
if (PointeeType.is<TupleType>()) {
292-
UsesToScalarize.push_back(User);
293-
continue;
293+
if (auto tupleType = PointeeType.getAs<TupleType>()) {
294+
if (!tupleType->isEqual(Module.getASTContext().TheEmptyTupleType)) {
295+
UsesToScalarize.push_back(User);
296+
continue;
297+
}
294298
}
295299

296300
auto kind = ([&]() -> PMOUseKind {
@@ -322,9 +326,11 @@ bool ElementUseCollector::collectUses(SILValue Pointer) {
322326
if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
323327
// If this is a copy of a tuple, we should scalarize it so that we don't
324328
// have an access that crosses elements.
325-
if (PointeeType.is<TupleType>()) {
326-
UsesToScalarize.push_back(CAI);
327-
continue;
329+
if (auto tupleType = PointeeType.getAs<TupleType>()) {
330+
if (!tupleType->isEqual(Module.getASTContext().TheEmptyTupleType)) {
331+
UsesToScalarize.push_back(CAI);
332+
continue;
333+
}
328334
}
329335

330336
// If this is the source of the copy_addr, then this is a load. If it is

test/SILOptimizer/predictable_memopt.sil

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,18 @@ bb0(%0 : $Int):
4545
return %d : $Int
4646
}
4747

48+
// CHECK-LABEL: sil @emptytuple_promotion :
49+
// CHECK: store
50+
// CHECK-LABEL: } // end sil function 'emptytuple_promotion'
51+
sil @emptytuple_promotion : $@convention(thin) () -> () {
52+
bb0:
53+
%1 = alloc_stack $()
54+
%2 = tuple ()
55+
store %2 to %1 : $*()
56+
dealloc_stack %1 : $*()
57+
return %2 : $()
58+
}
59+
4860
// In this example we create two boxes. The first box is initialized and then
4961
// taken from to initialize the second box. This means that the first box must
5062
// be dealloc_boxed (since its underlying memory is considered invalid). In

0 commit comments

Comments
 (0)