Skip to content

[4.2-04-30-2018][pred-memopts] Rather than asserting on recursive initialization, jus… #16545

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 91 additions & 50 deletions lib/SILOptimizer/Mandatory/DIMemoryUseCollector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -247,36 +247,46 @@ namespace {

/// This is the main entry point for the use walker. It collects uses from
/// the address and the refcount result of the allocation.
void collectFrom() {
if (auto *ABI = TheMemory.getContainer())
collectContainerUses(ABI);
else
collectUses(TheMemory.getAddress(), 0);

// Collect information about the retain count result as well.
for (auto UI : TheMemory.MemoryInst->getUses()) {
auto *User = UI->getUser();

// If this is a release or dealloc_stack, then remember it as such.
if (isa<StrongReleaseInst>(User) || isa<DeallocStackInst>(User) ||
isa<DeallocBoxInst>(User)) {
Releases.push_back(User);
}
}
}
LLVM_NODISCARD bool collectFrom();

private:
void collectUses(SILValue Pointer, unsigned BaseEltNo);
void collectContainerUses(AllocBoxInst *ABI);
LLVM_NODISCARD bool collectUses(SILValue Pointer, unsigned BaseEltNo);
LLVM_NODISCARD bool collectContainerUses(AllocBoxInst *ABI);
void addElementUses(unsigned BaseEltNo, SILType UseTy,
SILInstruction *User, DIUseKind Kind);
void collectTupleElementUses(TupleElementAddrInst *TEAI,
unsigned BaseEltNo);
void collectStructElementUses(StructElementAddrInst *SEAI,
unsigned BaseEltNo);
LLVM_NODISCARD bool collectTupleElementUses(TupleElementAddrInst *TEAI,
unsigned BaseEltNo);
LLVM_NODISCARD bool collectStructElementUses(StructElementAddrInst *SEAI,
unsigned BaseEltNo);
};
} // end anonymous namespace

bool ElementUseCollector::collectFrom() {
bool shouldOptimize = false;

if (auto *ABI = TheMemory.getContainer()) {
shouldOptimize = collectContainerUses(ABI);
} else {
shouldOptimize = collectUses(TheMemory.getAddress(), 0);
}

if (!shouldOptimize)
return false;

// Collect information about the retain count result as well.
for (auto UI : TheMemory.MemoryInst->getUses()) {
auto *User = UI->getUser();

// If this is a release or dealloc_stack, then remember it as such.
if (isa<StrongReleaseInst>(User) || isa<DeallocStackInst>(User) ||
isa<DeallocBoxInst>(User)) {
Releases.push_back(User);
}
}

return true;
}

/// addElementUses - An operation (e.g. load, store, inout use, etc) on a value
/// acts on all of the aggregate elements in that value. For example, a load
/// of $*(Int,Int) is a use of both Int elements of the tuple. This is a helper
Expand All @@ -295,8 +305,8 @@ void ElementUseCollector::addElementUses(unsigned BaseEltNo, SILType UseTy,
/// Given a tuple_element_addr or struct_element_addr, compute the new
/// BaseEltNo implicit in the selected member, and recursively add uses of
/// the instruction.
void ElementUseCollector::
collectTupleElementUses(TupleElementAddrInst *TEAI, unsigned BaseEltNo) {
bool ElementUseCollector::collectTupleElementUses(TupleElementAddrInst *TEAI,
unsigned BaseEltNo) {

// If we're walking into a tuple within a struct or enum, don't adjust the
// BaseElt. The uses hanging off the tuple_element_addr are going to be
Expand All @@ -314,20 +324,20 @@ collectTupleElementUses(TupleElementAddrInst *TEAI, unsigned BaseEltNo) {
BaseEltNo += getElementCountRec(Module, EltTy);
}
}
collectUses(TEAI, BaseEltNo);

return collectUses(TEAI, BaseEltNo);
}

void ElementUseCollector::collectStructElementUses(StructElementAddrInst *SEAI,
bool ElementUseCollector::collectStructElementUses(StructElementAddrInst *SEAI,
unsigned BaseEltNo) {
// Generally, we set the "InStructSubElement" flag and recursively process
// the uses so that we know that we're looking at something within the
// current element.
llvm::SaveAndRestore<bool> X(InStructSubElement, true);
collectUses(SEAI, BaseEltNo);
return collectUses(SEAI, BaseEltNo);
}

void ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
bool ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
for (Operand *UI : ABI->getUses()) {
auto *User = UI->getUser();

Expand All @@ -340,16 +350,21 @@ void ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
continue;

if (auto project = dyn_cast<ProjectBoxInst>(User)) {
collectUses(project, project->getFieldIndex());
if (!collectUses(project, project->getFieldIndex()))
return false;
continue;
}

// Other uses of the container are considered escapes of the values.
for (unsigned field : indices(ABI->getBoxType()->getLayout()->getFields()))
for (unsigned field :
indices(ABI->getBoxType()->getLayout()->getFields())) {
addElementUses(field,
ABI->getBoxType()->getFieldType(ABI->getModule(), field),
User, DIUseKind::Escape);
}
}

return true;
}

// Returns true when the instruction represents added instrumentation for
Expand All @@ -367,7 +382,7 @@ static bool isSanitizerInstrumentation(SILInstruction *Instruction,
return false;
}

void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
assert(Pointer->getType().isAddress() &&
"Walked through the pointer to the value?");
SILType PointeeType = Pointer->getType().getObjectType();
Expand All @@ -383,19 +398,22 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {

// struct_element_addr P, #field indexes into the current element.
if (auto *SEAI = dyn_cast<StructElementAddrInst>(User)) {
collectStructElementUses(SEAI, BaseEltNo);
if (!collectStructElementUses(SEAI, BaseEltNo))
return false;
continue;
}

// Instructions that compute a subelement are handled by a helper.
if (auto *TEAI = dyn_cast<TupleElementAddrInst>(User)) {
collectTupleElementUses(TEAI, BaseEltNo);
if (!collectTupleElementUses(TEAI, BaseEltNo))
return false;
continue;
}

// Look through begin_access.
if (auto I = dyn_cast<BeginAccessInst>(User)) {
collectUses(I, BaseEltNo);
if (!collectUses(I, BaseEltNo))
return false;
continue;
}

Expand Down Expand Up @@ -439,7 +457,7 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
continue;
}

if (auto *SWI = dyn_cast<StoreWeakInst>(User))
if (auto *SWI = dyn_cast<StoreWeakInst>(User)) {
if (UI->getOperandNumber() == 1) {
DIUseKind Kind;
if (InStructSubElement)
Expand All @@ -451,8 +469,9 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
continue;
}
}

if (auto *SUI = dyn_cast<StoreUnownedInst>(User))
if (auto *SUI = dyn_cast<StoreUnownedInst>(User)) {
if (UI->getOperandNumber() == 1) {
DIUseKind Kind;
if (InStructSubElement)
Expand All @@ -464,6 +483,7 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
continue;
}
}

if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
// If this is a copy of a tuple, we should scalarize it so that we don't
Expand Down Expand Up @@ -505,7 +525,13 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
// If this is an out-parameter, it is like a store.
unsigned NumIndirectResults = substConv.getNumIndirectSILResults();
if (ArgumentNumber < NumIndirectResults) {
assert(!InStructSubElement && "We're initializing sub-members?");
// We do not support initializing sub members. This is an old
// restriction from when this code was used by Definite
// Initialization. With proper code review, we can remove this, but for
// now, lets be conservative.
if (InStructSubElement) {
return false;
}
addElementUses(BaseEltNo, PointeeType, User,
DIUseKind::Initialization);
continue;
Expand Down Expand Up @@ -550,29 +576,38 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
// be explicitly initialized by a copy_addr or some other use of the
// projected address).
if (auto I = dyn_cast<InitEnumDataAddrInst>(User)) {
assert(!InStructSubElement &&
"init_enum_data_addr shouldn't apply to struct subelements");
// If we are in a struct already, bail. With proper analysis, we should be
// able to do this optimization.
if (InStructSubElement) {
return false;
}

// Keep track of the fact that we're inside of an enum. This informs our
// recursion that tuple stores are not scalarized outside, and that stores
// should not be treated as partial stores.
llvm::SaveAndRestore<bool> X(InEnumSubElement, true);
collectUses(I, BaseEltNo);
if (!collectUses(I, BaseEltNo))
return false;
continue;
}

// init_existential_addr is modeled as an initialization store.
if (isa<InitExistentialAddrInst>(User)) {
assert(!InStructSubElement &&
"init_existential_addr should not apply to struct subelements");
// init_existential_addr should not apply to struct subelements.
if (InStructSubElement) {
return false;
}
Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
BaseEltNo, 1));
continue;
}

// inject_enum_addr is modeled as an initialization store.
if (isa<InjectEnumAddrInst>(User)) {
assert(!InStructSubElement &&
"inject_enum_addr the subelement of a struct unless in a ctor");
// inject_enum_addr the subelement of a struct unless in a ctor.
if (InStructSubElement) {
return false;
}
Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
BaseEltNo, 1));
continue;
Expand Down Expand Up @@ -667,16 +702,22 @@ void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
// Now that we've scalarized some stuff, recurse down into the newly created
// element address computations to recursively process it. This can cause
// further scalarization.
for (auto EltPtr : ElementAddrs)
collectTupleElementUses(cast<TupleElementAddrInst>(EltPtr), BaseEltNo);
if (llvm::any_of(ElementAddrs, [&](SILValue V) {
return !collectTupleElementUses(cast<TupleElementAddrInst>(V),
BaseEltNo);
})) {
return false;
}
}

return true;
}

/// collectDIElementUsesFrom - Analyze all uses of the specified allocation
/// instruction (alloc_box, alloc_stack or mark_uninitialized), classifying them
/// and storing the information found into the Uses and Releases lists.
void swift::collectDIElementUsesFrom(
bool swift::collectDIElementUsesFrom(
const DIMemoryObjectInfo &MemoryInfo, SmallVectorImpl<DIMemoryUse> &Uses,
SmallVectorImpl<SILInstruction *> &Releases) {
ElementUseCollector(MemoryInfo, Uses, Releases).collectFrom();
return ElementUseCollector(MemoryInfo, Uses, Releases).collectFrom();
}
13 changes: 8 additions & 5 deletions lib/SILOptimizer/Mandatory/DIMemoryUseCollector.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,14 @@
#define SWIFT_SILOPTIMIZER_MANDATORY_DIMEMORYUSECOLLECTOR_H

#include "swift/Basic/LLVM.h"
#include "llvm/ADT/APInt.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/SIL/SILType.h"
#include "llvm/ADT/APInt.h"
#include "llvm/Support/Compiler.h"

namespace swift {
class SILBuilder;

class SILBuilder;

/// DIMemoryObjectInfo - This struct holds information about the memory object
/// being analyzed that is required to correctly break it down into elements.
Expand Down Expand Up @@ -187,9 +189,10 @@ struct DIMemoryUse {
/// collectDIElementUsesFrom - Analyze all uses of the specified allocation
/// instruction (alloc_box, alloc_stack or mark_uninitialized), classifying them
/// and storing the information found into the Uses and Releases lists.
void collectDIElementUsesFrom(const DIMemoryObjectInfo &MemoryInfo,
SmallVectorImpl<DIMemoryUse> &Uses,
SmallVectorImpl<SILInstruction *> &Releases);
LLVM_NODISCARD bool
collectDIElementUsesFrom(const DIMemoryObjectInfo &MemoryInfo,
SmallVectorImpl<DIMemoryUse> &Uses,
SmallVectorImpl<SILInstruction *> &Releases);

} // end namespace swift

Expand Down
11 changes: 8 additions & 3 deletions lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1372,9 +1372,14 @@ static bool optimizeMemoryAllocations(SILFunction &Fn) {
// Set up the datastructure used to collect the uses of the allocation.
SmallVector<DIMemoryUse, 16> Uses;
SmallVector<SILInstruction*, 4> Releases;

// Walk the use list of the pointer, collecting them.
collectDIElementUsesFrom(MemInfo, Uses, Releases);

// Walk the use list of the pointer, collecting them. If we are not able
// to optimize, skip this value. *NOTE* We may still scalarize values
// inside the value.
if (!collectDIElementUsesFrom(MemInfo, Uses, Releases)) {
++I;
continue;
}

Changed |= AllocOptimize(Alloc, Uses, Releases).doIt();

Expand Down
45 changes: 44 additions & 1 deletion test/SILOptimizer/predictable_memopt.sil
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import Builtin
import Swift


// CHECK-LABEL: sil @simple_reg_promotion
// CHECK: bb0(%0 : $Int):
// CHECK-NEXT: return %0 : $Int
Expand Down Expand Up @@ -838,3 +837,47 @@ bb2:
destroy_addr %1 : $*Builtin.NativeObject
unreachable
}


class K {
init()
}

sil @init_k : $@convention(thin) () -> @out K

struct S {
var k: K
}

// CHECK-LABEL: sil @recursive_struct_destroy_with_apply : $@convention(thin) () -> S {
// CHECK: alloc_stack
// CHECK: } // end sil function 'recursive_struct_destroy_with_apply'
sil @recursive_struct_destroy_with_apply : $@convention(thin) () -> S {
bb0:
%0 = alloc_stack $S
%1 = struct_element_addr %0 : $*S, #S.k
%2 = function_ref @init_k : $@convention(thin) () -> @out K
%3 = apply %2(%1) : $@convention(thin) () -> @out K
%4 = load %0 : $*S
dealloc_stack %0 : $*S
return %4 : $S
}

struct SWithOpt {
var k: Optional<K>
}

// CHECK-LABEL: sil @recursive_struct_destroy_with_enum_init : $@convention(thin) (@owned K) -> @owned SWithOpt {
// CHECK: alloc_stack
// CHECK: } // end sil function 'recursive_struct_destroy_with_enum_init'
sil @recursive_struct_destroy_with_enum_init : $@convention(thin) (@owned K) -> @owned SWithOpt {
bb0(%arg : $K):
%0 = alloc_stack $SWithOpt
%1 = struct_element_addr %0 : $*SWithOpt, #SWithOpt.k
%2 = init_enum_data_addr %1 : $*Optional<K>, #Optional.some!enumelt.1
store %arg to %2 : $*K
inject_enum_addr %1 : $*Optional<K>, #Optional.some!enumelt.1
%4 = load %0 : $*SWithOpt
dealloc_stack %0 : $*SWithOpt
return %4 : $SWithOpt
}

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No new line at end of file