@@ -71,7 +71,8 @@ namespace {
71
71
72
72
struct CachedBlock {
73
73
static constexpr u16 CacheIndexMax = UINT16_MAX;
74
- static constexpr u16 InvalidEntry = CacheIndexMax;
74
+ static constexpr u16 EndOfListVal = CacheIndexMax;
75
+
75
76
// We allow a certain amount of fragmentation and part of the fragmented bytes
76
77
// will be released by `releaseAndZeroPagesToOS()`. This increases the chance
77
78
// of cache hit rate and reduces the overhead to the RSS at the same time. See
@@ -206,17 +207,16 @@ class MapAllocatorCache {
206
207
&Fractional);
207
208
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
208
209
Str->append (
209
- " Stats: MapAllocatorCache: EntriesCount: %d , "
210
+ " Stats: MapAllocatorCache: EntriesCount: %zu , "
210
211
" MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n " ,
211
- EntriesCount , atomic_load_relaxed (&MaxEntriesCount),
212
+ LRUEntries. size () , atomic_load_relaxed (&MaxEntriesCount),
212
213
atomic_load_relaxed (&MaxEntrySize), Interval >= 0 ? Interval : -1 );
213
214
Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
214
215
" (%zu.%02zu%%)\n " ,
215
216
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
216
217
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
217
218
218
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
219
- CachedBlock &Entry = Entries[I];
219
+ for (CachedBlock &Entry : LRUEntries) {
220
220
Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221
221
" BlockSize: %zu %s\n " ,
222
222
Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
@@ -234,7 +234,7 @@ class MapAllocatorCache {
234
234
" Cache entry array is too large to be indexed." );
235
235
236
236
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237
- DCHECK_EQ (EntriesCount , 0U );
237
+ DCHECK_EQ (LRUEntries. size () , 0U );
238
238
setOption (Option::MaxCacheEntriesCount,
239
239
static_cast <sptr>(Config::getDefaultMaxEntriesCount ()));
240
240
setOption (Option::MaxCacheEntrySize,
@@ -244,17 +244,13 @@ class MapAllocatorCache {
244
244
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
245
245
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
246
246
247
- // The cache is initially empty
248
- LRUHead = CachedBlock::InvalidEntry;
249
- LRUTail = CachedBlock::InvalidEntry;
250
-
251
- // Available entries will be retrieved starting from the beginning of the
252
- // Entries array
253
- AvailableHead = 0 ;
254
- for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
255
- Entries[I].Next = static_cast <u16 >(I + 1 );
247
+ LRUEntries.clear ();
248
+ LRUEntries.init (Entries, sizeof (Entries));
256
249
257
- Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
250
+ AvailEntries.clear ();
251
+ AvailEntries.init (Entries, sizeof (Entries));
252
+ for (u32 I = 0 ; I < Config::getEntriesArraySize (); I++)
253
+ AvailEntries.push_back (&Entries[I]);
258
254
}
259
255
260
256
void store (const Options &Options, uptr CommitBase, uptr CommitSize,
@@ -326,11 +322,15 @@ class MapAllocatorCache {
326
322
Entry = PrevEntry;
327
323
}
328
324
329
- // All excess entries are evicted from the cache
330
- while (needToEvict ()) {
325
+ // All excess entries are evicted from the cache. Note that when
326
+ // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's
327
+ // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we
328
+ // won't try to pop `LRUEntries` when it's empty.
329
+ while (LRUEntries.size () >= atomic_load_relaxed (&MaxEntriesCount)) {
331
330
// Save MemMaps of evicted entries to perform unmap outside of lock
332
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
333
- remove (LRUTail);
331
+ CachedBlock *Entry = LRUEntries.back ();
332
+ EvictionMemMaps.push_back (Entry->MemMap );
333
+ remove (Entry);
334
334
}
335
335
336
336
insert (Entry);
@@ -360,9 +360,9 @@ class MapAllocatorCache {
360
360
{
361
361
ScopedLock L (Mutex);
362
362
CallsToRetrieve++;
363
- if (EntriesCount == 0 )
363
+ if (LRUEntries. size () == 0 )
364
364
return {};
365
- u16 RetrievedIndex = CachedBlock::InvalidEntry ;
365
+ CachedBlock *RetrievedEntry = nullptr ;
366
366
uptr MinDiff = UINTPTR_MAX;
367
367
368
368
// Since allocation sizes don't always match cached memory chunk sizes
@@ -382,10 +382,9 @@ class MapAllocatorCache {
382
382
// well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383
383
// MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384
384
// not considered valid for retrieval.
385
- for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
386
- I = Entries[I].Next ) {
387
- const uptr CommitBase = Entries[I].CommitBase ;
388
- const uptr CommitSize = Entries[I].CommitSize ;
385
+ for (CachedBlock &Entry : LRUEntries) {
386
+ const uptr CommitBase = Entry.CommitBase ;
387
+ const uptr CommitSize = Entry.CommitSize ;
389
388
const uptr AllocPos =
390
389
roundDown (CommitBase + CommitSize - Size, Alignment);
391
390
const uptr HeaderPos = AllocPos - HeadersSize;
@@ -408,7 +407,7 @@ class MapAllocatorCache {
408
407
continue ;
409
408
410
409
MinDiff = Diff;
411
- RetrievedIndex = I ;
410
+ RetrievedEntry = &Entry ;
412
411
EntryHeaderPos = HeaderPos;
413
412
414
413
// Immediately use a cached block if its size is close enough to the
@@ -418,9 +417,10 @@ class MapAllocatorCache {
418
417
if (Diff <= OptimalFitThesholdBytes)
419
418
break ;
420
419
}
421
- if (RetrievedIndex != CachedBlock::InvalidEntry) {
422
- Entry = Entries[RetrievedIndex];
423
- remove (RetrievedIndex);
420
+
421
+ if (RetrievedEntry != nullptr ) {
422
+ Entry = *RetrievedEntry;
423
+ remove (RetrievedEntry);
424
424
SuccessfulRetrieves++;
425
425
}
426
426
}
@@ -499,10 +499,8 @@ class MapAllocatorCache {
499
499
Quarantine[I].invalidate ();
500
500
}
501
501
}
502
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
503
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
504
- Entries[I].CommitSize , 0 );
505
- }
502
+ for (CachedBlock &Entry : LRUEntries)
503
+ Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
506
504
QuarantinePos = -1U ;
507
505
}
508
506
@@ -513,79 +511,30 @@ class MapAllocatorCache {
513
511
void unmapTestOnly () { empty (); }
514
512
515
513
private:
516
- bool needToEvict () REQUIRES(Mutex) {
517
- return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
518
- }
519
-
520
514
void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
521
- DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
522
-
523
- // Cache should be populated with valid entries when not empty
524
- DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
525
-
526
- u32 FreeIndex = AvailableHead;
527
- AvailableHead = Entries[AvailableHead].Next ;
528
-
529
- if (EntriesCount == 0 ) {
530
- LRUTail = static_cast <u16 >(FreeIndex);
531
- } else {
532
- // Check list order
533
- if (EntriesCount > 1 )
534
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
535
- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
536
- }
515
+ CachedBlock *AvailEntry = AvailEntries.front ();
516
+ AvailEntries.pop_front ();
537
517
538
- Entries[FreeIndex] = Entry;
539
- Entries[FreeIndex].Next = LRUHead;
540
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
541
- LRUHead = static_cast <u16 >(FreeIndex);
542
- EntriesCount++;
543
-
544
- // Availability stack should not have available entries when all entries
545
- // are in use
546
- if (EntriesCount == Config::getEntriesArraySize ())
547
- DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
518
+ *AvailEntry = Entry;
519
+ LRUEntries.push_front (AvailEntry);
548
520
}
549
521
550
- void remove (uptr I) REQUIRES(Mutex) {
551
- DCHECK (Entries[I].isValid ());
552
-
553
- Entries[I].invalidate ();
554
-
555
- if (I == LRUHead)
556
- LRUHead = Entries[I].Next ;
557
- else
558
- Entries[Entries[I].Prev ].Next = Entries[I].Next ;
559
-
560
- if (I == LRUTail)
561
- LRUTail = Entries[I].Prev ;
562
- else
563
- Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
564
-
565
- Entries[I].Next = AvailableHead;
566
- AvailableHead = static_cast <u16 >(I);
567
- EntriesCount--;
568
-
569
- // Cache should not have valid entries when not empty
570
- if (EntriesCount == 0 ) {
571
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
572
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
573
- }
522
+ void remove (CachedBlock *Entry) REQUIRES(Mutex) {
523
+ DCHECK (Entry->isValid ());
524
+ LRUEntries.remove (Entry);
525
+ Entry->invalidate ();
526
+ AvailEntries.push_front (Entry);
574
527
}
575
528
576
529
void empty () {
577
530
MemMapT MapInfo[Config::getEntriesArraySize ()];
578
531
uptr N = 0 ;
579
532
{
580
533
ScopedLock L (Mutex);
581
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
582
- if (!Entries[I].isValid ())
583
- continue ;
584
- MapInfo[N] = Entries[I].MemMap ;
585
- remove (I);
586
- N++;
587
- }
588
- EntriesCount = 0 ;
534
+
535
+ for (CachedBlock &Entry : LRUEntries)
536
+ MapInfo[N++] = Entry.MemMap ;
537
+ LRUEntries.clear ();
589
538
}
590
539
for (uptr I = 0 ; I < N; I++) {
591
540
MemMapT &MemMap = MapInfo[I];
@@ -607,7 +556,7 @@ class MapAllocatorCache {
607
556
608
557
void releaseOlderThan (u64 Time) EXCLUDES(Mutex) {
609
558
ScopedLock L (Mutex);
610
- if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
559
+ if (!LRUEntries. size () || OldestTime == 0 || OldestTime > Time)
611
560
return ;
612
561
OldestTime = 0 ;
613
562
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
@@ -617,7 +566,6 @@ class MapAllocatorCache {
617
566
}
618
567
619
568
HybridMutex Mutex;
620
- u32 EntriesCount GUARDED_BY (Mutex) = 0;
621
569
u32 QuarantinePos GUARDED_BY (Mutex) = 0;
622
570
atomic_u32 MaxEntriesCount = {};
623
571
atomic_uptr MaxEntrySize = {};
@@ -630,12 +578,10 @@ class MapAllocatorCache {
630
578
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
631
579
Quarantine GUARDED_BY (Mutex) = {};
632
580
633
- // The LRUHead of the cache is the most recently used cache entry
634
- u16 LRUHead GUARDED_BY (Mutex) = 0;
635
- // The LRUTail of the cache is the least recently used cache entry
636
- u16 LRUTail GUARDED_BY (Mutex) = 0;
637
- // The AvailableHead is the top of the stack of available entries
638
- u16 AvailableHead GUARDED_BY (Mutex) = 0;
581
+ // Cached blocks stored in LRU order
582
+ DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY (Mutex);
583
+ // The unused Entries
584
+ SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY (Mutex);
639
585
};
640
586
641
587
template <typename Config> class MapAllocator {
0 commit comments