@@ -182,7 +182,9 @@ class AccessConflictAnalysis {
182
182
PostOrderFunctionInfo *PO;
183
183
AccessedStorageAnalysis *ASA;
184
184
185
- // / Tracks the in-progress accesses at the end of each block.
185
+ // / Tracks the in-scope accesses at the end of each block, for the purpose of
186
+ // / finding nested conflicts. (Out-of-scope accesses are currently only
187
+ // / tracked locally for the purpose of merging access scopes.)
186
188
llvm::SmallDenseMap<SILBasicBlock *, DenseAccessSet, 32 > blockOutAccess;
187
189
188
190
Result result;
@@ -220,18 +222,21 @@ class AccessConflictAnalysis {
220
222
class SparseAccessSet {
221
223
AccessConflictAnalysis::Result &result;
222
224
223
- llvm::SmallBitVector bitmask; // Most functions have < 64 accesses.
224
- DenseAccessSet denseVec;
225
+ // Mark the in-scope accesses.
226
+ // (Most functions have < 64 accesses.)
227
+ llvm::SmallBitVector inScopeBitmask;
228
+ // Mark a potential conflicts on each access since the last begin/end marker.
229
+ llvm::SmallBitVector conflictBitmask;
230
+ DenseAccessSet denseVec; // Hold all local accesses seen thus far.
225
231
226
232
public:
227
- // / Internally, the denseVec may contain entries that have been removed from
228
- // / the bitmask. Iteration checks for membership in the bitmask.
229
- class Iterator {
233
+ // / Iterate over in-scope, conflict free access.
234
+ class NoNestedConflictIterator {
230
235
const SparseAccessSet &sparseSet;
231
236
DenseAccessSet::const_iterator denseIter;
232
237
233
238
public:
234
- Iterator (const SparseAccessSet &set)
239
+ NoNestedConflictIterator (const SparseAccessSet &set)
235
240
: sparseSet(set), denseIter(set.denseVec.begin()) {}
236
241
237
242
BeginAccessInst *next () {
@@ -240,67 +245,88 @@ class SparseAccessSet {
240
245
BeginAccessInst *beginAccess = *denseIter;
241
246
++denseIter;
242
247
unsigned sparseIndex = sparseSet.result .getAccessIndex (beginAccess);
243
- if (sparseSet.bitmask [sparseIndex])
248
+ if (sparseSet.inScopeBitmask [sparseIndex]
249
+ && !sparseSet.conflictBitmask [sparseIndex]) {
244
250
return beginAccess;
251
+ }
245
252
}
246
253
return nullptr ;
247
254
}
248
255
};
249
256
250
257
SparseAccessSet (AccessConflictAnalysis::Result &result)
251
- : result(result), bitmask(result.accessMap.size()) {}
258
+ : result(result), inScopeBitmask(result.accessMap.size()),
259
+ conflictBitmask (result.accessMap.size()) {}
252
260
261
+ // All accessed in the given denseVec are presumed to be in-scope and conflict
262
+ // free.
253
263
SparseAccessSet (const DenseAccessSet &denseVec,
254
264
AccessConflictAnalysis::Result &result)
255
- : result(result), bitmask(result.accessMap.size()), denseVec(denseVec) {
265
+ : result(result), inScopeBitmask(result.accessMap.size()),
266
+ conflictBitmask(result.accessMap.size()), denseVec(denseVec) {
256
267
for (BeginAccessInst *beginAccess : denseVec)
257
- bitmask .set (result.getAccessIndex (beginAccess));
268
+ inScopeBitmask .set (result.getAccessIndex (beginAccess));
258
269
}
259
-
260
- bool isEmpty () const {
261
- Iterator iterator (*this );
270
+ bool hasConflictFreeAccess () const {
271
+ NoNestedConflictIterator iterator (*this );
262
272
return iterator.next () == nullptr ;
263
273
}
264
274
265
- bool contains (unsigned index) const { return bitmask[index]; }
275
+ bool hasInScopeAccess () const {
276
+ return llvm::any_of (denseVec, [this ](BeginAccessInst *beginAccess) {
277
+ unsigned sparseIndex = result.getAccessIndex (beginAccess);
278
+ return inScopeBitmask[sparseIndex];
279
+ });
280
+ }
266
281
267
- // Insert the given BeginAccessInst with its corresponding reachability index.
268
- // Return true if the set was expanded.
269
- bool insert (BeginAccessInst *beginAccess, unsigned index) {
270
- if (bitmask[index])
271
- return false ;
282
+ bool isInScope (unsigned index) const { return inScopeBitmask[index]; }
272
283
273
- bitmask.set (index);
284
+ // Insert the given BeginAccessInst with its corresponding reachability index.
285
+ // Set the in-scope bit and reset the conflict bit.
286
+ bool enterScope (BeginAccessInst *beginAccess, unsigned index) {
287
+ assert (!inScopeBitmask[index]
288
+ && " nested access should not be dynamically enforced." );
289
+ inScopeBitmask.set (index);
290
+ conflictBitmask.reset (index);
274
291
denseVec.push_back (beginAccess);
275
292
return true ;
276
293
}
277
294
278
- // / Erase an access from this set based on the index provided by its mapped
279
- // / AccessInfo.
280
- // /
281
- // / Does not invalidate Iterator.
282
- void erase (unsigned index) { bitmask.reset (index); }
295
+ // / End the scope of the given access by marking it in-scope and clearing the
296
+ // / conflict bit. (The conflict bit only marks conflicts since the last begin
297
+ // / *or* end access).
298
+ void exitScope (unsigned index) { inScopeBitmask.reset (index); }
283
299
284
- bool isEquivalent ( const SparseAccessSet &other ) const {
285
- return bitmask == other. bitmask ;
286
- }
300
+ bool seenConflict ( unsigned index ) const { return conflictBitmask[index]; }
301
+
302
+ void setConflict ( unsigned index) { conflictBitmask. set (index); }
287
303
288
304
// Only merge accesses that are present on the `other` map. i.e. erase
289
305
// all accesses in this map that are not present in `other`.
290
- void merge (const SparseAccessSet &other) { bitmask &= other.bitmask ; }
306
+ void merge (const SparseAccessSet &other) {
307
+ inScopeBitmask &= other.inScopeBitmask ;
308
+ // Currently only conflict free accesses are preserved across blocks by this
309
+ // analysis. Otherwise, taking the union of conflict bits would be valid.
310
+ assert (other.conflictBitmask .none ());
311
+ }
291
312
292
- void copyInto (DenseAccessSet &other) {
313
+ void copyNoNestedConflictInto (DenseAccessSet &other) {
293
314
other.clear ();
294
- Iterator iterator (*this );
295
- while (BeginAccessInst *beginAccess = iterator.next ()) {
315
+ NoNestedConflictIterator iterator (*this );
316
+ while (BeginAccessInst *beginAccess = iterator.next ())
296
317
other.push_back (beginAccess);
297
- }
298
318
}
299
319
320
+ // Dump only the accesses with no conflict up to this point.
300
321
void dump () const {
301
- Iterator iterator (*this );
302
- while (BeginAccessInst *beginAccess = iterator.next ()) {
322
+ for (BeginAccessInst *beginAccess : denseVec) {
323
+ unsigned sparseIndex = result.getAccessIndex (beginAccess);
324
+ if (conflictBitmask[sparseIndex])
325
+ continue ;
326
+
303
327
llvm::dbgs () << *beginAccess << " " ;
328
+ if (!inScopeBitmask[sparseIndex])
329
+ llvm::dbgs () << " [noscope]" ;
304
330
result.getAccessInfo (beginAccess).dump ();
305
331
}
306
332
}
@@ -361,7 +387,7 @@ void AccessConflictAnalysis::identifyBeginAccesses() {
361
387
// / conflicts. Erasing from SparseAccessSet does not invalidate any iterators.
362
388
static void recordConflict (AccessInfo &info, SparseAccessSet &accessSet) {
363
389
info.setSeenNestedConflict ();
364
- accessSet.erase (info.getAccessIndex ());
390
+ accessSet.setConflict (info.getAccessIndex ());
365
391
}
366
392
367
393
// Given an "inner" access, check for potential conflicts with any outer access.
@@ -382,7 +408,7 @@ void AccessConflictAnalysis::visitBeginAccess(BeginAccessInst *innerBeginAccess,
382
408
const AccessInfo &innerAccess = result.getAccessInfo (innerBeginAccess);
383
409
SILAccessKind innerAccessKind = innerBeginAccess->getAccessKind ();
384
410
385
- SparseAccessSet::Iterator accessIter (accessSet);
411
+ SparseAccessSet::NoNestedConflictIterator accessIter (accessSet);
386
412
while (BeginAccessInst *outerBeginAccess = accessIter.next ()) {
387
413
// If both are reads, keep the mapped access.
388
414
if (!accessKindMayConflict (innerAccessKind,
@@ -406,7 +432,7 @@ void AccessConflictAnalysis::visitBeginAccess(BeginAccessInst *innerBeginAccess,
406
432
// Record the current access in the map. It can potentially be folded
407
433
// regardless of whether it may conflict with an outer access.
408
434
bool inserted =
409
- accessSet.insert (innerBeginAccess, innerAccess.getAccessIndex ());
435
+ accessSet.enterScope (innerBeginAccess, innerAccess.getAccessIndex ());
410
436
(void )inserted;
411
437
assert (inserted && " the same begin_access cannot be seen twice." );
412
438
}
@@ -418,21 +444,21 @@ void AccessConflictAnalysis::visitEndAccess(EndAccessInst *endAccess,
418
444
return ;
419
445
420
446
unsigned index = result.getAccessIndex (beginAccess);
421
- DEBUG (if (accessSet.contains (index)) llvm::dbgs ()
447
+ DEBUG (if (accessSet.seenConflict (index)) llvm::dbgs ()
422
448
<< " No conflict on one path from " << *beginAccess << " to "
423
449
<< *endAccess);
424
450
425
451
// Erase this access from the sparse set. We only want to detect conflicts
426
452
// within the access scope.
427
- accessSet.erase (index);
453
+ accessSet.exitScope (index);
428
454
}
429
455
430
456
void AccessConflictAnalysis::visitFullApply (FullApplySite fullApply,
431
457
SparseAccessSet &accessSet) {
432
458
FunctionAccessedStorage callSiteAccesses;
433
459
ASA->getCallSiteEffects (callSiteAccesses, fullApply);
434
460
435
- SparseAccessSet::Iterator accessIter (accessSet);
461
+ SparseAccessSet::NoNestedConflictIterator accessIter (accessSet);
436
462
while (BeginAccessInst *outerBeginAccess = accessIter.next ()) {
437
463
438
464
// If there is no potential conflict, leave the outer access mapped.
@@ -484,16 +510,16 @@ void AccessConflictAnalysis::visitBlock(SILBasicBlock *BB) {
484
510
visitFullApply (fullApply, accessSet);
485
511
}
486
512
}
487
- DEBUG (if (! accessSet.isEmpty ()) {
488
- llvm::dbgs () << " Initializing accesses out of bb" << BB-> getDebugID ()
489
- << " \n " ;
513
+ DEBUG (if (accessSet.hasConflictFreeAccess ()) {
514
+ llvm::dbgs () << " Initializing no-conflict access out of bb"
515
+ << BB-> getDebugID () << " \n " ;
490
516
accessSet.dump ();
491
517
});
492
518
if (BB->getTerminator ()->isFunctionExiting ())
493
- assert (accessSet.isEmpty () && " no postdominating end_access" );
519
+ assert (! accessSet.hasInScopeAccess () && " no postdominating end_access" );
494
520
495
521
// Initialize blockOutAccess for this block with the current access set.
496
- accessSet.copyInto (blockOutAccess[BB]);
522
+ accessSet.copyNoNestedConflictInto (blockOutAccess[BB]);
497
523
}
498
524
499
525
// -----------------------------------------------------------------------------
0 commit comments