Skip to content

[pmo] Eliminate dead flat namespace tuple numbering from PMOMemoryUseCollector. #21645

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
227 changes: 53 additions & 174 deletions lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,22 +23,9 @@
using namespace swift;

//===----------------------------------------------------------------------===//
// PMOMemoryObjectInfo Implementation
// PMOMemoryObjectInfo Implementation
//===----------------------------------------------------------------------===//

static unsigned getElementCountRec(SILModule &Module, SILType T) {
// If this is a tuple, it is always recursively flattened.
if (CanTupleType TT = T.getAs<TupleType>()) {
unsigned NumElements = 0;
for (unsigned i = 0, e = TT->getNumElements(); i < e; i++)
NumElements += getElementCountRec(Module, T.getTupleElementType(i));
return NumElements;
}

// Otherwise, it is a single element.
return 1;
}

PMOMemoryObjectInfo::PMOMemoryObjectInfo(AllocationInst *allocation)
: MemoryInst(allocation) {
auto &module = MemoryInst->getModule();
Expand All @@ -51,90 +38,12 @@ PMOMemoryObjectInfo::PMOMemoryObjectInfo(AllocationInst *allocation)
} else {
MemorySILType = cast<AllocStackInst>(MemoryInst)->getElementType();
}

// Break down the initializer.
NumElements = getElementCountRec(module, MemorySILType);
}

SILInstruction *PMOMemoryObjectInfo::getFunctionEntryPoint() const {
return &*getFunction().begin()->begin();
}

/// Given a symbolic element number, return the type of the element.
static SILType getElementTypeRec(SILModule &Module, SILType T, unsigned EltNo) {
// If this is a tuple type, walk into it.
if (CanTupleType TT = T.getAs<TupleType>()) {
for (unsigned i = 0, e = TT->getNumElements(); i < e; i++) {
auto FieldType = T.getTupleElementType(i);
unsigned NumFieldElements = getElementCountRec(Module, FieldType);
if (EltNo < NumFieldElements)
return getElementTypeRec(Module, FieldType, EltNo);
EltNo -= NumFieldElements;
}
// This can only happen if we look at a symbolic element number of an empty
// tuple.
llvm::report_fatal_error("invalid element number");
}

// Otherwise, it is a leaf element.
assert(EltNo == 0);
return T;
}

/// getElementTypeRec - Return the swift type of the specified element.
SILType PMOMemoryObjectInfo::getElementType(unsigned EltNo) const {
auto &Module = MemoryInst->getModule();
return getElementTypeRec(Module, MemorySILType, EltNo);
}

/// Push the symbolic path name to the specified element number onto the
/// specified std::string.
static void getPathStringToElementRec(SILModule &Module, SILType T,
unsigned EltNo, std::string &Result) {
if (CanTupleType TT = T.getAs<TupleType>()) {
unsigned FieldNo = 0;
for (unsigned i = 0, e = TT->getNumElements(); i < e; i++) {
auto Field = TT->getElement(i);
SILType FieldTy = T.getTupleElementType(i);
unsigned NumFieldElements = getElementCountRec(Module, FieldTy);

if (EltNo < NumFieldElements) {
Result += '.';
if (Field.hasName())
Result += Field.getName().str();
else
Result += llvm::utostr(FieldNo);
return getPathStringToElementRec(Module, FieldTy, EltNo, Result);
}

EltNo -= NumFieldElements;

++FieldNo;
}
llvm_unreachable("Element number is out of range for this type!");
}

// Otherwise, there are no subelements.
assert(EltNo == 0 && "Element count problem");
}

ValueDecl *
PMOMemoryObjectInfo::getPathStringToElement(unsigned Element,
std::string &Result) const {
auto &Module = MemoryInst->getModule();

if (auto *VD = dyn_cast_or_null<ValueDecl>(getLoc().getAsASTNode<Decl>()))
Result = VD->getBaseName().userFacingName();
else
Result = "<unknown>";

// Get the path through a tuple, if relevant.
getPathStringToElementRec(Module, MemorySILType, Element, Result);

// Otherwise, we can't.
return nullptr;
}

//===----------------------------------------------------------------------===//
// Scalarization Logic
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -207,14 +116,11 @@ class ElementUseCollector {
LLVM_NODISCARD bool collectFrom();

private:
LLVM_NODISCARD bool collectUses(SILValue Pointer, unsigned BaseEltNo);
LLVM_NODISCARD bool collectUses(SILValue Pointer);
LLVM_NODISCARD bool collectContainerUses(AllocBoxInst *ABI);
void addElementUses(unsigned BaseEltNo, SILType UseTy, SILInstruction *User,
PMOUseKind Kind);
LLVM_NODISCARD bool collectTupleElementUses(TupleElementAddrInst *TEAI,
unsigned BaseEltNo);
LLVM_NODISCARD bool collectStructElementUses(StructElementAddrInst *SEAI,
unsigned BaseEltNo);
void addElementUses(SILInstruction *User, PMOUseKind Kind);
LLVM_NODISCARD bool collectTupleElementUses(TupleElementAddrInst *TEAI);
LLVM_NODISCARD bool collectStructElementUses(StructElementAddrInst *SEAI);
};
} // end anonymous namespace

Expand All @@ -224,7 +130,7 @@ bool ElementUseCollector::collectFrom() {
if (auto *ABI = TheMemory.getContainer()) {
shouldOptimize = collectContainerUses(ABI);
} else {
shouldOptimize = collectUses(TheMemory.getAddress(), 0);
shouldOptimize = collectUses(TheMemory.getAddress());
}

if (!shouldOptimize)
Expand All @@ -247,51 +153,28 @@ bool ElementUseCollector::collectFrom() {
/// acts on all of the aggregate elements in that value. For example, a load
/// of $*(Int,Int) is a use of both Int elements of the tuple. This is a helper
/// to keep the Uses data structure up to date for aggregate uses.
void ElementUseCollector::addElementUses(unsigned BaseEltNo, SILType UseTy,
SILInstruction *User,
void ElementUseCollector::addElementUses(SILInstruction *User,
PMOUseKind Kind) {
// If we're in a subelement of a struct or enum, just mark the struct, not
// things that come after it in a parent tuple.
unsigned NumElements = 1;
if (TheMemory.NumElements != 1 && !InStructSubElement)
NumElements = getElementCountRec(Module, UseTy);

Uses.push_back(PMOMemoryUse(User, Kind, BaseEltNo, NumElements));
Uses.emplace_back(User, Kind);
}

/// Given a tuple_element_addr or struct_element_addr, compute the new
/// BaseEltNo implicit in the selected member, and recursively add uses of
/// the instruction.
bool ElementUseCollector::collectTupleElementUses(TupleElementAddrInst *TEAI,
unsigned BaseEltNo) {

bool ElementUseCollector::collectTupleElementUses(TupleElementAddrInst *TEAI) {
// If we're walking into a tuple within a struct or enum, don't adjust the
// BaseElt. The uses hanging off the tuple_element_addr are going to be
// counted as uses of the struct or enum itself.
if (InStructSubElement)
return collectUses(TEAI, BaseEltNo);

// tuple_element_addr P, 42 indexes into the current tuple element.
// Recursively process its uses with the adjusted element number.
unsigned FieldNo = TEAI->getFieldNo();
auto T = TEAI->getOperand()->getType();
if (T.is<TupleType>()) {
for (unsigned i = 0; i != FieldNo; ++i) {
SILType EltTy = T.getTupleElementType(i);
BaseEltNo += getElementCountRec(Module, EltTy);
}
}

return collectUses(TEAI, BaseEltNo);
return collectUses(TEAI);
}

bool ElementUseCollector::collectStructElementUses(StructElementAddrInst *SEAI,
unsigned BaseEltNo) {
bool ElementUseCollector::collectStructElementUses(
StructElementAddrInst *SEAI) {
// Generally, we set the "InStructSubElement" flag and recursively process
// the uses so that we know that we're looking at something within the
// current element.
llvm::SaveAndRestore<bool> X(InStructSubElement, true);
return collectUses(SEAI, BaseEltNo);
return collectUses(SEAI);
}

bool ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
Expand All @@ -307,24 +190,23 @@ bool ElementUseCollector::collectContainerUses(AllocBoxInst *ABI) {
continue;

if (auto project = dyn_cast<ProjectBoxInst>(User)) {
if (!collectUses(project, project->getFieldIndex()))
if (!collectUses(project))
return false;
continue;
}

// Other uses of the container are considered escapes of the values.
for (unsigned field :
indices(ABI->getBoxType()->getLayout()->getFields())) {
addElementUses(field,
ABI->getBoxType()->getFieldType(ABI->getModule(), field),
User, PMOUseKind::Escape);
}
// Other uses of the container are considered escapes of the underlying
// value.
//
// This will cause the dataflow to stop propagating any information at the
// use block.
addElementUses(User, PMOUseKind::Escape);
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This part of the change requires a bit of explanation. Previously what was happening is that PredictableMemOpts was completely ignoring the field, type here. So we basically had PredictableMemOpts processing the same use multiple times. The effect is the same as just having one use: stopping the propagation of available values.

}

return true;
}

bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
bool ElementUseCollector::collectUses(SILValue Pointer) {
assert(Pointer->getType().isAddress() &&
"Walked through the pointer to the value?");
SILType PointeeType = Pointer->getType().getObjectType();
Expand All @@ -340,21 +222,21 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {

// struct_element_addr P, #field indexes into the current element.
if (auto *SEAI = dyn_cast<StructElementAddrInst>(User)) {
if (!collectStructElementUses(SEAI, BaseEltNo))
if (!collectStructElementUses(SEAI))
return false;
continue;
}

// Instructions that compute a subelement are handled by a helper.
if (auto *TEAI = dyn_cast<TupleElementAddrInst>(User)) {
if (!collectTupleElementUses(TEAI, BaseEltNo))
if (!collectTupleElementUses(TEAI))
return false;
continue;
}

// Look through begin_access.
if (auto I = dyn_cast<BeginAccessInst>(User)) {
if (!collectUses(I, BaseEltNo))
if (!collectUses(I))
return false;
continue;
}
Expand All @@ -369,15 +251,15 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
if (PointeeType.is<TupleType>())
UsesToScalarize.push_back(User);
else
addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::Load);
addElementUses(User, PMOUseKind::Load);
continue;
}

#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
if (isa<Load##Name##Inst>(User)) { \
Uses.push_back(PMOMemoryUse(User, PMOUseKind::Load, BaseEltNo, 1)); \
continue; \
}
#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
if (isa<Load##Name##Inst>(User)) { \
Uses.emplace_back(User, PMOUseKind::Load); \
continue; \
}
#include "swift/AST/ReferenceStorage.def"

// Stores *to* the allocation are writes.
Expand All @@ -397,24 +279,24 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
else
Kind = PMOUseKind::Initialization;

addElementUses(BaseEltNo, PointeeType, User, Kind);
addElementUses(User, Kind);
continue;
}

#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
if (auto *SI = dyn_cast<Store##Name##Inst>(User)) { \
if (UI->getOperandNumber() == 1) { \
PMOUseKind Kind; \
if (InStructSubElement) \
Kind = PMOUseKind::PartialStore; \
else if (SI->isInitializationOfDest()) \
Kind = PMOUseKind::Initialization; \
else \
Kind = PMOUseKind::Assign; \
Uses.push_back(PMOMemoryUse(User, Kind, BaseEltNo, 1)); \
continue; \
} \
}
#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
if (auto *SI = dyn_cast<Store##Name##Inst>(User)) { \
if (UI->getOperandNumber() == 1) { \
PMOUseKind Kind; \
if (InStructSubElement) \
Kind = PMOUseKind::PartialStore; \
else if (SI->isInitializationOfDest()) \
Kind = PMOUseKind::Initialization; \
else \
Kind = PMOUseKind::Assign; \
Uses.emplace_back(User, Kind); \
continue; \
} \
}
#include "swift/AST/ReferenceStorage.def"

if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
Expand All @@ -439,7 +321,7 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
else
Kind = PMOUseKind::Assign;

addElementUses(BaseEltNo, PointeeType, User, Kind);
addElementUses(User, Kind);
continue;
}

Expand All @@ -464,8 +346,7 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
if (InStructSubElement) {
return false;
}
addElementUses(BaseEltNo, PointeeType, User,
PMOUseKind::Initialization);
addElementUses(User, PMOUseKind::Initialization);
continue;

// Otherwise, adjust the argument index.
Expand All @@ -486,7 +367,7 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
case ParameterConvention::Indirect_In:
case ParameterConvention::Indirect_In_Constant:
case ParameterConvention::Indirect_In_Guaranteed:
addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::IndirectIn);
addElementUses(User, PMOUseKind::IndirectIn);
continue;

// If this is an @inout parameter, it is like both a load and store.
Expand All @@ -496,7 +377,7 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
// mutating method, we model that as an escape of self. If an
// individual sub-member is passed as inout, then we model that as an
// inout use.
addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::InOutUse);
addElementUses(User, PMOUseKind::InOutUse);
continue;
}
}
Expand All @@ -509,15 +390,14 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
if (InStructSubElement) {
return false;
}
Uses.push_back(
PMOMemoryUse(User, PMOUseKind::Initialization, BaseEltNo, 1));
Uses.push_back(PMOMemoryUse(User, PMOUseKind::Initialization));
continue;
}

// open_existential_addr is a use of the protocol value,
// so it is modeled as a load.
if (isa<OpenExistentialAddrInst>(User)) {
Uses.push_back(PMOMemoryUse(User, PMOUseKind::Load, BaseEltNo, 1));
Uses.push_back(PMOMemoryUse(User, PMOUseKind::Load));
// TODO: Is it safe to ignore all uses of the open_existential_addr?
continue;
}
Expand All @@ -538,7 +418,7 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
continue;

// Otherwise, the use is something complicated, it escapes.
addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::Escape);
addElementUses(User, PMOUseKind::Escape);
}

// Now that we've walked all of the immediate uses, scalarize any operations
Expand Down Expand Up @@ -604,8 +484,7 @@ bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
// element address computations to recursively process it. This can cause
// further scalarization.
if (llvm::any_of(ElementAddrs, [&](SILValue V) {
return !collectTupleElementUses(cast<TupleElementAddrInst>(V),
BaseEltNo);
return !collectTupleElementUses(cast<TupleElementAddrInst>(V));
})) {
return false;
}
Expand Down
Loading