Skip to content

Commit f223dcc

Browse files
committed
[pred-memopts] Rather than asserting on recursive initialization, just return false and bail.
Until the beginning of the ownership transition, DI and predictable mem opts used the same memory use collector. I split them partially since I need to turn on ownership for predictable mem opts at one time, but also b/c there was a huge amount of special code that would only trigger if it was used by DI or used by predictable mem opts. After I did the copy some of the asserts that were needed for DI remained in the predictable mem opts code. When pred-memopts was only run in the mandatory pipeline keeping these assertions were ok, but pred-memopts was recently added to the perf pipeline meaning that it may see code that breaks these DI invariants (and thus hit this assertion). We should remove this limitation on predictable-memopts but that would require some scheduled time to read the code (more than I have to fix this bug = p). So instead I changed the code to just bail in these cases. rdar://40032102 (cherry picked from commit 269f8e8)
1 parent 96993e6 commit f223dcc

File tree

4 files changed

+151
-59
lines changed

4 files changed

+151
-59
lines changed

lib/SILOptimizer/Mandatory/DIMemoryUseCollector.cpp

Lines changed: 91 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -247,36 +247,46 @@ namespace {
247247

248248
/// This is the main entry point for the use walker. It collects uses from
249249
/// the address and the refcount result of the allocation.
250-
void collectFrom() {
251-
if (auto *ABI = TheMemory.getContainer())
252-
collectContainerUses(ABI);
253-
else
254-
collectUses(TheMemory.getAddress(), 0);
255-
256-
// Collect information about the retain count result as well.
257-
for (auto UI : TheMemory.MemoryInst->getUses()) {
258-
auto *User = UI->getUser();
259-
260-
// If this is a release or dealloc_stack, then remember it as such.
261-
if (isa<StrongReleaseInst>(User) || isa<DeallocStackInst>(User) ||
262-
isa<DeallocBoxInst>(User)) {
263-
Releases.push_back(User);
264-
}
265-
}
266-
}
250+
LLVM_NODISCARD bool collectFrom();
267251

268252
private:
269-
void collectUses(SILValue Pointer, unsigned BaseEltNo);
270-
void collectContainerUses(AllocBoxInst *ABI);
253+
LLVM_NODISCARD bool collectUses(SILValue Pointer, unsigned BaseEltNo);
254+
LLVM_NODISCARD bool collectContainerUses(AllocBoxInst *ABI);
271255
void addElementUses(unsigned BaseEltNo, SILType UseTy,
272256
SILInstruction *User, DIUseKind Kind);
273-
void collectTupleElementUses(TupleElementAddrInst *TEAI,
274-
unsigned BaseEltNo);
275-
void collectStructElementUses(StructElementAddrInst *SEAI,
276-
unsigned BaseEltNo);
257+
LLVM_NODISCARD bool collectTupleElementUses(TupleElementAddrInst *TEAI,
258+
unsigned BaseEltNo);
259+
LLVM_NODISCARD bool collectStructElementUses(StructElementAddrInst *SEAI,
260+
unsigned BaseEltNo);
277261
};
278262
} // end anonymous namespace
279263

264+
bool ElementUseCollector::collectFrom() {
265+
bool shouldOptimize = false;
266+
267+
if (auto *ABI = TheMemory.getContainer()) {
268+
shouldOptimize = collectContainerUses(ABI);
269+
} else {
270+
shouldOptimize = collectUses(TheMemory.getAddress(), 0);
271+
}
272+
273+
if (!shouldOptimize)
274+
return false;
275+
276+
// Collect information about the retain count result as well.
277+
for (auto UI : TheMemory.MemoryInst->getUses()) {
278+
auto *User = UI->getUser();
279+
280+
// If this is a release or dealloc_stack, then remember it as such.
281+
if (isa<StrongReleaseInst>(User) || isa<DeallocStackInst>(User) ||
282+
isa<DeallocBoxInst>(User)) {
283+
Releases.push_back(User);
284+
}
285+
}
286+
287+
return true;
288+
}
289+
280290
/// addElementUses - An operation (e.g. load, store, inout use, etc) on a value
281291
/// acts on all of the aggregate elements in that value. For example, a load
282292
/// of $*(Int,Int) is a use of both Int elements of the tuple. This is a helper
@@ -295,8 +305,8 @@ void ElementUseCollector::addElementUses(unsigned BaseEltNo, SILType UseTy,
295305
/// Given a tuple_element_addr or struct_element_addr, compute the new
296306
/// BaseEltNo implicit in the selected member, and recursively add uses of
297307
/// the instruction.
298-
void ElementUseCollector::
299-
collectTupleElementUses(TupleElementAddrInst *TEAI, unsigned BaseEltNo) {
308+
bool ElementUseCollector::collectTupleElementUses(TupleElementAddrInst *TEAI,
309+
unsigned BaseEltNo) {
300310

301311
// If we're walking into a tuple within a struct or enum, don't adjust the
302312
// BaseElt. The uses hanging off the tuple_element_addr are going to be
@@ -314,20 +324,20 @@ collectTupleElementUses(TupleElementAddrInst *TEAI, unsigned BaseEltNo) {
314324
BaseEltNo += getElementCountRec(Module, EltTy);
315325
}
316326
}
317-
318-
collectUses(TEAI, BaseEltNo);
327+
328+
return collectUses(TEAI, BaseEltNo);
319329
}
320330

321-
void ElementUseCollector::collectStructElementUses(StructElementAddrInst *SEAI,
331+
bool ElementUseCollector::collectStructElementUses(StructElementAddrInst *SEAI,
322332
unsigned BaseEltNo) {
323333
// Generally, we set the "InStructSubElement" flag and recursively process
324334
// the uses so that we know that we're looking at something within the
325335
// current element.
326336
llvm::SaveAndRestore<bool> X(InStructSubElement, true);
327-
collectUses(SEAI, BaseEltNo);
337+
return collectUses(SEAI, BaseEltNo);
328338
}
329339

330-
void ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
340+
bool ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
331341
for (Operand *UI : ABI->getUses()) {
332342
auto *User = UI->getUser();
333343

@@ -340,16 +350,21 @@ void ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
340350
continue;
341351

342352
if (auto project = dyn_cast<ProjectBoxInst>(User)) {
343-
collectUses(project, project->getFieldIndex());
353+
if (!collectUses(project, project->getFieldIndex()))
354+
return false;
344355
continue;
345356
}
346357

347358
// Other uses of the container are considered escapes of the values.
348-
for (unsigned field : indices(ABI->getBoxType()->getLayout()->getFields()))
359+
for (unsigned field :
360+
indices(ABI->getBoxType()->getLayout()->getFields())) {
349361
addElementUses(field,
350362
ABI->getBoxType()->getFieldType(ABI->getModule(), field),
351363
User, DIUseKind::Escape);
364+
}
352365
}
366+
367+
return true;
353368
}
354369

355370
// Returns true when the instruction represents added instrumentation for
@@ -367,7 +382,7 @@ static bool isSanitizerInstrumentation(SILInstruction *Instruction,
367382
return false;
368383
}
369384

370-
void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
385+
bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
371386
assert(Pointer->getType().isAddress() &&
372387
"Walked through the pointer to the value?");
373388
SILType PointeeType = Pointer->getType().getObjectType();
@@ -383,19 +398,22 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
383398

384399
// struct_element_addr P, #field indexes into the current element.
385400
if (auto *SEAI = dyn_cast<StructElementAddrInst>(User)) {
386-
collectStructElementUses(SEAI, BaseEltNo);
401+
if (!collectStructElementUses(SEAI, BaseEltNo))
402+
return false;
387403
continue;
388404
}
389405

390406
// Instructions that compute a subelement are handled by a helper.
391407
if (auto *TEAI = dyn_cast<TupleElementAddrInst>(User)) {
392-
collectTupleElementUses(TEAI, BaseEltNo);
408+
if (!collectTupleElementUses(TEAI, BaseEltNo))
409+
return false;
393410
continue;
394411
}
395412

396413
// Look through begin_access.
397414
if (auto I = dyn_cast<BeginAccessInst>(User)) {
398-
collectUses(I, BaseEltNo);
415+
if (!collectUses(I, BaseEltNo))
416+
return false;
399417
continue;
400418
}
401419

@@ -439,7 +457,7 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
439457
continue;
440458
}
441459

442-
if (auto *SWI = dyn_cast<StoreWeakInst>(User))
460+
if (auto *SWI = dyn_cast<StoreWeakInst>(User)) {
443461
if (UI->getOperandNumber() == 1) {
444462
DIUseKind Kind;
445463
if (InStructSubElement)
@@ -451,8 +469,9 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
451469
Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
452470
continue;
453471
}
472+
}
454473

455-
if (auto *SUI = dyn_cast<StoreUnownedInst>(User))
474+
if (auto *SUI = dyn_cast<StoreUnownedInst>(User)) {
456475
if (UI->getOperandNumber() == 1) {
457476
DIUseKind Kind;
458477
if (InStructSubElement)
@@ -464,6 +483,7 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
464483
Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
465484
continue;
466485
}
486+
}
467487

468488
if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
469489
// If this is a copy of a tuple, we should scalarize it so that we don't
@@ -505,7 +525,13 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
505525
// If this is an out-parameter, it is like a store.
506526
unsigned NumIndirectResults = substConv.getNumIndirectSILResults();
507527
if (ArgumentNumber < NumIndirectResults) {
508-
assert(!InStructSubElement && "We're initializing sub-members?");
528+
// We do not support initializing sub members. This is an old
529+
// restriction from when this code was used by Definite
530+
// Initialization. With proper code review, we can remove this, but for
531+
// now, lets be conservative.
532+
if (InStructSubElement) {
533+
return false;
534+
}
509535
addElementUses(BaseEltNo, PointeeType, User,
510536
DIUseKind::Initialization);
511537
continue;
@@ -550,29 +576,38 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
550576
// be explicitly initialized by a copy_addr or some other use of the
551577
// projected address).
552578
if (auto I = dyn_cast<InitEnumDataAddrInst>(User)) {
553-
assert(!InStructSubElement &&
554-
"init_enum_data_addr shouldn't apply to struct subelements");
579+
// If we are in a struct already, bail. With proper analysis, we should be
580+
// able to do this optimization.
581+
if (InStructSubElement) {
582+
return false;
583+
}
584+
555585
// Keep track of the fact that we're inside of an enum. This informs our
556586
// recursion that tuple stores are not scalarized outside, and that stores
557587
// should not be treated as partial stores.
558588
llvm::SaveAndRestore<bool> X(InEnumSubElement, true);
559-
collectUses(I, BaseEltNo);
589+
if (!collectUses(I, BaseEltNo))
590+
return false;
560591
continue;
561592
}
562593

563594
// init_existential_addr is modeled as an initialization store.
564595
if (isa<InitExistentialAddrInst>(User)) {
565-
assert(!InStructSubElement &&
566-
"init_existential_addr should not apply to struct subelements");
596+
// init_existential_addr should not apply to struct subelements.
597+
if (InStructSubElement) {
598+
return false;
599+
}
567600
Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
568601
BaseEltNo, 1));
569602
continue;
570603
}
571604

572605
// inject_enum_addr is modeled as an initialization store.
573606
if (isa<InjectEnumAddrInst>(User)) {
574-
assert(!InStructSubElement &&
575-
"inject_enum_addr the subelement of a struct unless in a ctor");
607+
// inject_enum_addr the subelement of a struct unless in a ctor.
608+
if (InStructSubElement) {
609+
return false;
610+
}
576611
Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
577612
BaseEltNo, 1));
578613
continue;
@@ -667,16 +702,22 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
667702
// Now that we've scalarized some stuff, recurse down into the newly created
668703
// element address computations to recursively process it. This can cause
669704
// further scalarization.
670-
for (auto EltPtr : ElementAddrs)
671-
collectTupleElementUses(cast<TupleElementAddrInst>(EltPtr), BaseEltNo);
705+
if (llvm::any_of(ElementAddrs, [&](SILValue V) {
706+
return !collectTupleElementUses(cast<TupleElementAddrInst>(V),
707+
BaseEltNo);
708+
})) {
709+
return false;
710+
}
672711
}
712+
713+
return true;
673714
}
674715

675716
/// collectDIElementUsesFrom - Analyze all uses of the specified allocation
676717
/// instruction (alloc_box, alloc_stack or mark_uninitialized), classifying them
677718
/// and storing the information found into the Uses and Releases lists.
678-
void swift::collectDIElementUsesFrom(
719+
bool swift::collectDIElementUsesFrom(
679720
const DIMemoryObjectInfo &MemoryInfo, SmallVectorImpl<DIMemoryUse> &Uses,
680721
SmallVectorImpl<SILInstruction *> &Releases) {
681-
ElementUseCollector(MemoryInfo, Uses, Releases).collectFrom();
722+
return ElementUseCollector(MemoryInfo, Uses, Releases).collectFrom();
682723
}

lib/SILOptimizer/Mandatory/DIMemoryUseCollector.h

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,14 @@
2121
#define SWIFT_SILOPTIMIZER_MANDATORY_DIMEMORYUSECOLLECTOR_H
2222

2323
#include "swift/Basic/LLVM.h"
24-
#include "llvm/ADT/APInt.h"
2524
#include "swift/SIL/SILInstruction.h"
2625
#include "swift/SIL/SILType.h"
26+
#include "llvm/ADT/APInt.h"
27+
#include "llvm/Support/Compiler.h"
2728

2829
namespace swift {
29-
class SILBuilder;
30+
31+
class SILBuilder;
3032

3133
/// DIMemoryObjectInfo - This struct holds information about the memory object
3234
/// being analyzed that is required to correctly break it down into elements.
@@ -187,9 +189,10 @@ struct DIMemoryUse {
187189
/// collectDIElementUsesFrom - Analyze all uses of the specified allocation
188190
/// instruction (alloc_box, alloc_stack or mark_uninitialized), classifying them
189191
/// and storing the information found into the Uses and Releases lists.
190-
void collectDIElementUsesFrom(const DIMemoryObjectInfo &MemoryInfo,
191-
SmallVectorImpl<DIMemoryUse> &Uses,
192-
SmallVectorImpl<SILInstruction *> &Releases);
192+
LLVM_NODISCARD bool
193+
collectDIElementUsesFrom(const DIMemoryObjectInfo &MemoryInfo,
194+
SmallVectorImpl<DIMemoryUse> &Uses,
195+
SmallVectorImpl<SILInstruction *> &Releases);
193196

194197
} // end namespace swift
195198

lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1372,9 +1372,14 @@ static bool optimizeMemoryAllocations(SILFunction &Fn) {
13721372
// Set up the datastructure used to collect the uses of the allocation.
13731373
SmallVector<DIMemoryUse, 16> Uses;
13741374
SmallVector<SILInstruction*, 4> Releases;
1375-
1376-
// Walk the use list of the pointer, collecting them.
1377-
collectDIElementUsesFrom(MemInfo, Uses, Releases);
1375+
1376+
// Walk the use list of the pointer, collecting them. If we are not able
1377+
// to optimize, skip this value. *NOTE* We may still scalarize values
1378+
// inside the value.
1379+
if (!collectDIElementUsesFrom(MemInfo, Uses, Releases)) {
1380+
++I;
1381+
continue;
1382+
}
13781383

13791384
Changed |= AllocOptimize(Alloc, Uses, Releases).doIt();
13801385

test/SILOptimizer/predictable_memopt.sil

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import Builtin
44
import Swift
55

6-
76
// CHECK-LABEL: sil @simple_reg_promotion
87
// CHECK: bb0(%0 : $Int):
98
// CHECK-NEXT: return %0 : $Int
@@ -838,3 +837,47 @@ bb2:
838837
destroy_addr %1 : $*Builtin.NativeObject
839838
unreachable
840839
}
840+
841+
842+
class K {
843+
init()
844+
}
845+
846+
sil @init_k : $@convention(thin) () -> @out K
847+
848+
struct S {
849+
var k: K
850+
}
851+
852+
// CHECK-LABEL: sil @recursive_struct_destroy_with_apply : $@convention(thin) () -> S {
853+
// CHECK: alloc_stack
854+
// CHECK: } // end sil function 'recursive_struct_destroy_with_apply'
855+
sil @recursive_struct_destroy_with_apply : $@convention(thin) () -> S {
856+
bb0:
857+
%0 = alloc_stack $S
858+
%1 = struct_element_addr %0 : $*S, #S.k
859+
%2 = function_ref @init_k : $@convention(thin) () -> @out K
860+
%3 = apply %2(%1) : $@convention(thin) () -> @out K
861+
%4 = load %0 : $*S
862+
dealloc_stack %0 : $*S
863+
return %4 : $S
864+
}
865+
866+
struct SWithOpt {
867+
var k: Optional<K>
868+
}
869+
870+
// CHECK-LABEL: sil @recursive_struct_destroy_with_enum_init : $@convention(thin) (@owned K) -> @owned SWithOpt {
871+
// CHECK: alloc_stack
872+
// CHECK: } // end sil function 'recursive_struct_destroy_with_enum_init'
873+
sil @recursive_struct_destroy_with_enum_init : $@convention(thin) (@owned K) -> @owned SWithOpt {
874+
bb0(%arg : $K):
875+
%0 = alloc_stack $SWithOpt
876+
%1 = struct_element_addr %0 : $*SWithOpt, #SWithOpt.k
877+
%2 = init_enum_data_addr %1 : $*Optional<K>, #Optional.some!enumelt.1
878+
store %arg to %2 : $*K
879+
inject_enum_addr %1 : $*Optional<K>, #Optional.some!enumelt.1
880+
%4 = load %0 : $*SWithOpt
881+
dealloc_stack %0 : $*SWithOpt
882+
return %4 : $SWithOpt
883+
}

0 commit comments

Comments
 (0)