Skip to content

Commit df1b9b4

Browse files
committed
Rework AccessEnforcementOpts SparseSet.
This makes the intention more clear and sets up a new optimization to merge access scopes.
1 parent 88a2b43 commit df1b9b4

File tree

1 file changed

+73
-47
lines changed

1 file changed

+73
-47
lines changed

lib/SILOptimizer/Transforms/AccessEnforcementOpts.cpp

Lines changed: 73 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,9 @@ class AccessConflictAnalysis {
182182
PostOrderFunctionInfo *PO;
183183
AccessedStorageAnalysis *ASA;
184184

185-
/// Tracks the in-progress accesses at the end of each block.
185+
/// Tracks the in-scope accesses at the end of each block, for the purpose of
186+
/// finding nested conflicts. (Out-of-scope accesses are currently only
187+
/// tracked locally for the purpose of merging access scopes.)
186188
llvm::SmallDenseMap<SILBasicBlock *, DenseAccessSet, 32> blockOutAccess;
187189

188190
Result result;
@@ -220,18 +222,21 @@ class AccessConflictAnalysis {
220222
class SparseAccessSet {
221223
AccessConflictAnalysis::Result &result;
222224

223-
llvm::SmallBitVector bitmask; // Most functions have < 64 accesses.
224-
DenseAccessSet denseVec;
225+
// Mark the in-scope accesses.
226+
// (Most functions have < 64 accesses.)
227+
llvm::SmallBitVector inScopeBitmask;
228+
// Mark a potential conflicts on each access since the last begin/end marker.
229+
llvm::SmallBitVector conflictBitmask;
230+
DenseAccessSet denseVec; // Hold all local accesses seen thus far.
225231

226232
public:
227-
/// Internally, the denseVec may contain entries that have been removed from
228-
/// the bitmask. Iteration checks for membership in the bitmask.
229-
class Iterator {
233+
/// Iterate over in-scope, conflict free access.
234+
class NoNestedConflictIterator {
230235
const SparseAccessSet &sparseSet;
231236
DenseAccessSet::const_iterator denseIter;
232237

233238
public:
234-
Iterator(const SparseAccessSet &set)
239+
NoNestedConflictIterator(const SparseAccessSet &set)
235240
: sparseSet(set), denseIter(set.denseVec.begin()) {}
236241

237242
BeginAccessInst *next() {
@@ -240,67 +245,88 @@ class SparseAccessSet {
240245
BeginAccessInst *beginAccess = *denseIter;
241246
++denseIter;
242247
unsigned sparseIndex = sparseSet.result.getAccessIndex(beginAccess);
243-
if (sparseSet.bitmask[sparseIndex])
248+
if (sparseSet.inScopeBitmask[sparseIndex]
249+
&& !sparseSet.conflictBitmask[sparseIndex]) {
244250
return beginAccess;
251+
}
245252
}
246253
return nullptr;
247254
}
248255
};
249256

250257
SparseAccessSet(AccessConflictAnalysis::Result &result)
251-
: result(result), bitmask(result.accessMap.size()) {}
258+
: result(result), inScopeBitmask(result.accessMap.size()),
259+
conflictBitmask(result.accessMap.size()) {}
252260

261+
// All accessed in the given denseVec are presumed to be in-scope and conflict
262+
// free.
253263
SparseAccessSet(const DenseAccessSet &denseVec,
254264
AccessConflictAnalysis::Result &result)
255-
: result(result), bitmask(result.accessMap.size()), denseVec(denseVec) {
265+
: result(result), inScopeBitmask(result.accessMap.size()),
266+
conflictBitmask(result.accessMap.size()), denseVec(denseVec) {
256267
for (BeginAccessInst *beginAccess : denseVec)
257-
bitmask.set(result.getAccessIndex(beginAccess));
268+
inScopeBitmask.set(result.getAccessIndex(beginAccess));
258269
}
259-
260-
bool isEmpty() const {
261-
Iterator iterator(*this);
270+
bool hasConflictFreeAccess() const {
271+
NoNestedConflictIterator iterator(*this);
262272
return iterator.next() == nullptr;
263273
}
264274

265-
bool contains(unsigned index) const { return bitmask[index]; }
275+
bool hasInScopeAccess() const {
276+
return llvm::any_of(denseVec, [this](BeginAccessInst *beginAccess) {
277+
unsigned sparseIndex = result.getAccessIndex(beginAccess);
278+
return inScopeBitmask[sparseIndex];
279+
});
280+
}
266281

267-
// Insert the given BeginAccessInst with its corresponding reachability index.
268-
// Return true if the set was expanded.
269-
bool insert(BeginAccessInst *beginAccess, unsigned index) {
270-
if (bitmask[index])
271-
return false;
282+
bool isInScope(unsigned index) const { return inScopeBitmask[index]; }
272283

273-
bitmask.set(index);
284+
// Insert the given BeginAccessInst with its corresponding reachability index.
285+
// Set the in-scope bit and reset the conflict bit.
286+
bool enterScope(BeginAccessInst *beginAccess, unsigned index) {
287+
assert(!inScopeBitmask[index]
288+
&& "nested access should not be dynamically enforced.");
289+
inScopeBitmask.set(index);
290+
conflictBitmask.reset(index);
274291
denseVec.push_back(beginAccess);
275292
return true;
276293
}
277294

278-
/// Erase an access from this set based on the index provided by its mapped
279-
/// AccessInfo.
280-
///
281-
/// Does not invalidate Iterator.
282-
void erase(unsigned index) { bitmask.reset(index); }
295+
/// End the scope of the given access by marking it in-scope and clearing the
296+
/// conflict bit. (The conflict bit only marks conflicts since the last begin
297+
/// *or* end access).
298+
void exitScope(unsigned index) { inScopeBitmask.reset(index); }
283299

284-
bool isEquivalent(const SparseAccessSet &other) const {
285-
return bitmask == other.bitmask;
286-
}
300+
bool seenConflict(unsigned index) const { return conflictBitmask[index]; }
301+
302+
void setConflict(unsigned index) { conflictBitmask.set(index); }
287303

288304
// Only merge accesses that are present on the `other` map. i.e. erase
289305
// all accesses in this map that are not present in `other`.
290-
void merge(const SparseAccessSet &other) { bitmask &= other.bitmask; }
306+
void merge(const SparseAccessSet &other) {
307+
inScopeBitmask &= other.inScopeBitmask;
308+
// Currently only conflict free accesses are preserved across blocks by this
309+
// analysis. Otherwise, taking the union of conflict bits would be valid.
310+
assert(other.conflictBitmask.none());
311+
}
291312

292-
void copyInto(DenseAccessSet &other) {
313+
void copyNoNestedConflictInto(DenseAccessSet &other) {
293314
other.clear();
294-
Iterator iterator(*this);
295-
while (BeginAccessInst *beginAccess = iterator.next()) {
315+
NoNestedConflictIterator iterator(*this);
316+
while (BeginAccessInst *beginAccess = iterator.next())
296317
other.push_back(beginAccess);
297-
}
298318
}
299319

320+
// Dump only the accesses with no conflict up to this point.
300321
void dump() const {
301-
Iterator iterator(*this);
302-
while (BeginAccessInst *beginAccess = iterator.next()) {
322+
for (BeginAccessInst *beginAccess : denseVec) {
323+
unsigned sparseIndex = result.getAccessIndex(beginAccess);
324+
if (conflictBitmask[sparseIndex])
325+
continue;
326+
303327
llvm::dbgs() << *beginAccess << " ";
328+
if (!inScopeBitmask[sparseIndex])
329+
llvm::dbgs() << " [noscope]";
304330
result.getAccessInfo(beginAccess).dump();
305331
}
306332
}
@@ -361,7 +387,7 @@ void AccessConflictAnalysis::identifyBeginAccesses() {
361387
/// conflicts. Erasing from SparseAccessSet does not invalidate any iterators.
362388
static void recordConflict(AccessInfo &info, SparseAccessSet &accessSet) {
363389
info.setSeenNestedConflict();
364-
accessSet.erase(info.getAccessIndex());
390+
accessSet.setConflict(info.getAccessIndex());
365391
}
366392

367393
// Given an "inner" access, check for potential conflicts with any outer access.
@@ -382,7 +408,7 @@ void AccessConflictAnalysis::visitBeginAccess(BeginAccessInst *innerBeginAccess,
382408
const AccessInfo &innerAccess = result.getAccessInfo(innerBeginAccess);
383409
SILAccessKind innerAccessKind = innerBeginAccess->getAccessKind();
384410

385-
SparseAccessSet::Iterator accessIter(accessSet);
411+
SparseAccessSet::NoNestedConflictIterator accessIter(accessSet);
386412
while (BeginAccessInst *outerBeginAccess = accessIter.next()) {
387413
// If both are reads, keep the mapped access.
388414
if (!accessKindMayConflict(innerAccessKind,
@@ -406,7 +432,7 @@ void AccessConflictAnalysis::visitBeginAccess(BeginAccessInst *innerBeginAccess,
406432
// Record the current access in the map. It can potentially be folded
407433
// regardless of whether it may conflict with an outer access.
408434
bool inserted =
409-
accessSet.insert(innerBeginAccess, innerAccess.getAccessIndex());
435+
accessSet.enterScope(innerBeginAccess, innerAccess.getAccessIndex());
410436
(void)inserted;
411437
assert(inserted && "the same begin_access cannot be seen twice.");
412438
}
@@ -418,21 +444,21 @@ void AccessConflictAnalysis::visitEndAccess(EndAccessInst *endAccess,
418444
return;
419445

420446
unsigned index = result.getAccessIndex(beginAccess);
421-
DEBUG(if (accessSet.contains(index)) llvm::dbgs()
447+
DEBUG(if (accessSet.seenConflict(index)) llvm::dbgs()
422448
<< "No conflict on one path from " << *beginAccess << " to "
423449
<< *endAccess);
424450

425451
// Erase this access from the sparse set. We only want to detect conflicts
426452
// within the access scope.
427-
accessSet.erase(index);
453+
accessSet.exitScope(index);
428454
}
429455

430456
void AccessConflictAnalysis::visitFullApply(FullApplySite fullApply,
431457
SparseAccessSet &accessSet) {
432458
FunctionAccessedStorage callSiteAccesses;
433459
ASA->getCallSiteEffects(callSiteAccesses, fullApply);
434460

435-
SparseAccessSet::Iterator accessIter(accessSet);
461+
SparseAccessSet::NoNestedConflictIterator accessIter(accessSet);
436462
while (BeginAccessInst *outerBeginAccess = accessIter.next()) {
437463

438464
// If there is no potential conflict, leave the outer access mapped.
@@ -484,16 +510,16 @@ void AccessConflictAnalysis::visitBlock(SILBasicBlock *BB) {
484510
visitFullApply(fullApply, accessSet);
485511
}
486512
}
487-
DEBUG(if (!accessSet.isEmpty()) {
488-
llvm::dbgs() << "Initializing accesses out of bb" << BB->getDebugID()
489-
<< "\n";
513+
DEBUG(if (accessSet.hasConflictFreeAccess()) {
514+
llvm::dbgs() << "Initializing no-conflict access out of bb"
515+
<< BB->getDebugID() << "\n";
490516
accessSet.dump();
491517
});
492518
if (BB->getTerminator()->isFunctionExiting())
493-
assert(accessSet.isEmpty() && "no postdominating end_access");
519+
assert(!accessSet.hasInScopeAccess() && "no postdominating end_access");
494520

495521
// Initialize blockOutAccess for this block with the current access set.
496-
accessSet.copyInto(blockOutAccess[BB]);
522+
accessSet.copyNoNestedConflictInto(blockOutAccess[BB]);
497523
}
498524

499525
// -----------------------------------------------------------------------------

0 commit comments

Comments
 (0)