@@ -71,7 +71,8 @@ namespace {
71
71
72
72
struct CachedBlock {
73
73
static constexpr u16 CacheIndexMax = UINT16_MAX;
74
- static constexpr u16 InvalidEntry = CacheIndexMax;
74
+ static constexpr scudo::uptr EndOfListVal = CacheIndexMax;
75
+
75
76
// We allow a certain amount of fragmentation and part of the fragmented bytes
76
77
// will be released by `releaseAndZeroPagesToOS()`. This increases the chance
77
78
// of cache hit rate and reduces the overhead to the RSS at the same time. See
@@ -206,17 +207,16 @@ class MapAllocatorCache {
206
207
&Fractional);
207
208
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
208
209
Str->append (
209
- " Stats: MapAllocatorCache: EntriesCount: %d , "
210
+ " Stats: MapAllocatorCache: EntriesCount: %zu , "
210
211
" MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n " ,
211
- EntriesCount , atomic_load_relaxed (&MaxEntriesCount),
212
+ LRUEntries. size () , atomic_load_relaxed (&MaxEntriesCount),
212
213
atomic_load_relaxed (&MaxEntrySize), Interval >= 0 ? Interval : -1 );
213
214
Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
214
215
" (%zu.%02zu%%)\n " ,
215
216
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
216
217
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
217
218
218
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
219
- CachedBlock &Entry = Entries[I];
219
+ for (CachedBlock &Entry : LRUEntries) {
220
220
Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221
221
" BlockSize: %zu %s\n " ,
222
222
Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
@@ -234,7 +234,7 @@ class MapAllocatorCache {
234
234
" Cache entry array is too large to be indexed." );
235
235
236
236
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237
- DCHECK_EQ (EntriesCount , 0U );
237
+ DCHECK_EQ (LRUEntries. size () , 0U );
238
238
setOption (Option::MaxCacheEntriesCount,
239
239
static_cast <sptr>(Config::getDefaultMaxEntriesCount ()));
240
240
setOption (Option::MaxCacheEntrySize,
@@ -244,17 +244,13 @@ class MapAllocatorCache {
244
244
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
245
245
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
246
246
247
- // The cache is initially empty
248
- LRUHead = CachedBlock::InvalidEntry;
249
- LRUTail = CachedBlock::InvalidEntry;
250
-
251
- // Available entries will be retrieved starting from the beginning of the
252
- // Entries array
253
- AvailableHead = 0 ;
254
- for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
255
- Entries[I].Next = static_cast <u16 >(I + 1 );
247
+ LRUEntries.clear ();
248
+ LRUEntries.init (Entries, sizeof (Entries));
256
249
257
- Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
250
+ AvailEntries.clear ();
251
+ AvailEntries.init (Entries, sizeof (Entries));
252
+ for (u32 I = 0 ; I < Config::getEntriesArraySize (); I++)
253
+ AvailEntries.push_back (&Entries[I]);
258
254
}
259
255
260
256
void store (const Options &Options, uptr CommitBase, uptr CommitSize,
@@ -329,8 +325,9 @@ class MapAllocatorCache {
329
325
// All excess entries are evicted from the cache
330
326
while (needToEvict ()) {
331
327
// Save MemMaps of evicted entries to perform unmap outside of lock
332
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
333
- remove (LRUTail);
328
+ CachedBlock *Entry = LRUEntries.back ();
329
+ EvictionMemMaps.push_back (Entry->MemMap );
330
+ remove (Entry);
334
331
}
335
332
336
333
insert (Entry);
@@ -360,9 +357,9 @@ class MapAllocatorCache {
360
357
{
361
358
ScopedLock L (Mutex);
362
359
CallsToRetrieve++;
363
- if (EntriesCount == 0 )
360
+ if (LRUEntries. size () == 0 )
364
361
return {};
365
- u16 RetrievedIndex = CachedBlock::InvalidEntry ;
362
+ CachedBlock *RetrievedEntry = nullptr ;
366
363
uptr MinDiff = UINTPTR_MAX;
367
364
368
365
// Since allocation sizes don't always match cached memory chunk sizes
@@ -382,10 +379,9 @@ class MapAllocatorCache {
382
379
// well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383
380
// MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384
381
// not considered valid for retrieval.
385
- for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
386
- I = Entries[I].Next ) {
387
- const uptr CommitBase = Entries[I].CommitBase ;
388
- const uptr CommitSize = Entries[I].CommitSize ;
382
+ for (CachedBlock &Entry : LRUEntries) {
383
+ const uptr CommitBase = Entry.CommitBase ;
384
+ const uptr CommitSize = Entry.CommitSize ;
389
385
const uptr AllocPos =
390
386
roundDown (CommitBase + CommitSize - Size, Alignment);
391
387
const uptr HeaderPos = AllocPos - HeadersSize;
@@ -408,7 +404,7 @@ class MapAllocatorCache {
408
404
continue ;
409
405
410
406
MinDiff = Diff;
411
- RetrievedIndex = I ;
407
+ RetrievedEntry = &Entry ;
412
408
EntryHeaderPos = HeaderPos;
413
409
414
410
// Immediately use a cached block if its size is close enough to the
@@ -418,9 +414,10 @@ class MapAllocatorCache {
418
414
if (Diff <= OptimalFitThesholdBytes)
419
415
break ;
420
416
}
421
- if (RetrievedIndex != CachedBlock::InvalidEntry) {
422
- Entry = Entries[RetrievedIndex];
423
- remove (RetrievedIndex);
417
+
418
+ if (RetrievedEntry != nullptr ) {
419
+ Entry = *RetrievedEntry;
420
+ remove (RetrievedEntry);
424
421
SuccessfulRetrieves++;
425
422
}
426
423
}
@@ -499,9 +496,8 @@ class MapAllocatorCache {
499
496
Quarantine[I].invalidate ();
500
497
}
501
498
}
502
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
503
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
504
- Entries[I].CommitSize , 0 );
499
+ for (CachedBlock &Entry : LRUEntries) {
500
+ Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
505
501
}
506
502
QuarantinePos = -1U ;
507
503
}
@@ -514,78 +510,33 @@ class MapAllocatorCache {
514
510
515
511
private:
516
512
bool needToEvict () REQUIRES(Mutex) {
517
- return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
513
+ return (LRUEntries. size () >= atomic_load_relaxed (&MaxEntriesCount));
518
514
}
519
515
520
516
void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
521
- DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
522
-
523
- // Cache should be populated with valid entries when not empty
524
- DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
525
-
526
- u32 FreeIndex = AvailableHead;
527
- AvailableHead = Entries[AvailableHead].Next ;
528
-
529
- if (EntriesCount == 0 ) {
530
- LRUTail = static_cast <u16 >(FreeIndex);
531
- } else {
532
- // Check list order
533
- if (EntriesCount > 1 )
534
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
535
- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
536
- }
537
-
538
- Entries[FreeIndex] = Entry;
539
- Entries[FreeIndex].Next = LRUHead;
540
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
541
- LRUHead = static_cast <u16 >(FreeIndex);
542
- EntriesCount++;
517
+ CachedBlock *FreeEntry = AvailEntries.front ();
518
+ AvailEntries.pop_front ();
543
519
544
- // Availability stack should not have available entries when all entries
545
- // are in use
546
- if (EntriesCount == Config::getEntriesArraySize ())
547
- DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
520
+ *FreeEntry = Entry;
521
+ LRUEntries.push_front (FreeEntry);
548
522
}
549
523
550
- void remove (uptr I) REQUIRES(Mutex) {
551
- DCHECK (Entries[I].isValid ());
552
-
553
- Entries[I].invalidate ();
554
-
555
- if (I == LRUHead)
556
- LRUHead = Entries[I].Next ;
557
- else
558
- Entries[Entries[I].Prev ].Next = Entries[I].Next ;
559
-
560
- if (I == LRUTail)
561
- LRUTail = Entries[I].Prev ;
562
- else
563
- Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
564
-
565
- Entries[I].Next = AvailableHead;
566
- AvailableHead = static_cast <u16 >(I);
567
- EntriesCount--;
568
-
569
- // Cache should not have valid entries when not empty
570
- if (EntriesCount == 0 ) {
571
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
572
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
573
- }
524
+ void remove (CachedBlock *Entry) REQUIRES(Mutex) {
525
+ DCHECK (Entry->isValid ());
526
+ LRUEntries.remove (Entry);
527
+ Entry->invalidate ();
528
+ AvailEntries.push_front (Entry);
574
529
}
575
530
576
531
void empty () {
577
532
MemMapT MapInfo[Config::getEntriesArraySize ()];
578
533
uptr N = 0 ;
579
534
{
580
535
ScopedLock L (Mutex);
581
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
582
- if (!Entries[I].isValid ())
583
- continue ;
584
- MapInfo[N] = Entries[I].MemMap ;
585
- remove (I);
586
- N++;
587
- }
588
- EntriesCount = 0 ;
536
+
537
+ for (CachedBlock &Entry : LRUEntries)
538
+ MapInfo[N++] = Entry.MemMap ;
539
+ LRUEntries.clear ();
589
540
}
590
541
for (uptr I = 0 ; I < N; I++) {
591
542
MemMapT &MemMap = MapInfo[I];
@@ -607,7 +558,7 @@ class MapAllocatorCache {
607
558
608
559
void releaseOlderThan (u64 Time) EXCLUDES(Mutex) {
609
560
ScopedLock L (Mutex);
610
- if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
561
+ if (!LRUEntries. size () || OldestTime == 0 || OldestTime > Time)
611
562
return ;
612
563
OldestTime = 0 ;
613
564
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
@@ -617,7 +568,6 @@ class MapAllocatorCache {
617
568
}
618
569
619
570
HybridMutex Mutex;
620
- u32 EntriesCount GUARDED_BY (Mutex) = 0;
621
571
u32 QuarantinePos GUARDED_BY (Mutex) = 0;
622
572
atomic_u32 MaxEntriesCount = {};
623
573
atomic_uptr MaxEntrySize = {};
@@ -630,12 +580,9 @@ class MapAllocatorCache {
630
580
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
631
581
Quarantine GUARDED_BY (Mutex) = {};
632
582
633
- // The LRUHead of the cache is the most recently used cache entry
634
- u16 LRUHead GUARDED_BY (Mutex) = 0;
635
- // The LRUTail of the cache is the least recently used cache entry
636
- u16 LRUTail GUARDED_BY (Mutex) = 0;
637
- // The AvailableHead is the top of the stack of available entries
638
- u16 AvailableHead GUARDED_BY (Mutex) = 0;
583
+ DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY (Mutex);
584
+ // The unused Entries
585
+ SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY (Mutex);
639
586
};
640
587
641
588
template <typename Config> class MapAllocator {
0 commit comments