Skip to content

Commit 9c5217c

Browse files
authored
[scudo] Use internal list to manage the LRU cache (#117946)
1 parent 9302043 commit 9c5217c

File tree

2 files changed

+53
-106
lines changed

2 files changed

+53
-106
lines changed

compiler-rt/lib/scudo/standalone/list.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,11 @@ template <class T> class LinkOp<T, /*LinkWithPtr=*/false> {
6161
using LinkTy = decltype(T::Next);
6262

6363
LinkOp() = default;
64-
LinkOp(T *BaseT, uptr BaseSize) : Base(BaseT), Size(BaseSize) {}
64+
// TODO: Check if the `BaseSize` can fit in `Size`.
65+
LinkOp(T *BaseT, uptr BaseSize)
66+
: Base(BaseT), Size(static_cast<LinkTy>(BaseSize)) {}
6567
void init(T *LinkBase, uptr BaseSize) {
6668
Base = LinkBase;
67-
// TODO: Check if the `BaseSize` can fit in `Size`.
6869
Size = static_cast<LinkTy>(BaseSize);
6970
}
7071
T *getBase() const { return Base; }

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 50 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,8 @@ namespace {
7171

7272
struct CachedBlock {
7373
static constexpr u16 CacheIndexMax = UINT16_MAX;
74-
static constexpr u16 InvalidEntry = CacheIndexMax;
74+
static constexpr u16 EndOfListVal = CacheIndexMax;
75+
7576
// We allow a certain amount of fragmentation and part of the fragmented bytes
7677
// will be released by `releaseAndZeroPagesToOS()`. This increases the chance
7778
// of cache hit rate and reduces the overhead to the RSS at the same time. See
@@ -206,17 +207,16 @@ class MapAllocatorCache {
206207
&Fractional);
207208
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
208209
Str->append(
209-
"Stats: MapAllocatorCache: EntriesCount: %d, "
210+
"Stats: MapAllocatorCache: EntriesCount: %zu, "
210211
"MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
211-
EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
212+
LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
212213
atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
213214
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
214215
"(%zu.%02zu%%)\n",
215216
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
216217
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
217218

218-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
219-
CachedBlock &Entry = Entries[I];
219+
for (CachedBlock &Entry : LRUEntries) {
220220
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221221
"BlockSize: %zu %s\n",
222222
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
@@ -234,7 +234,7 @@ class MapAllocatorCache {
234234
"Cache entry array is too large to be indexed.");
235235

236236
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237-
DCHECK_EQ(EntriesCount, 0U);
237+
DCHECK_EQ(LRUEntries.size(), 0U);
238238
setOption(Option::MaxCacheEntriesCount,
239239
static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
240240
setOption(Option::MaxCacheEntrySize,
@@ -244,17 +244,13 @@ class MapAllocatorCache {
244244
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
245245
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
246246

247-
// The cache is initially empty
248-
LRUHead = CachedBlock::InvalidEntry;
249-
LRUTail = CachedBlock::InvalidEntry;
250-
251-
// Available entries will be retrieved starting from the beginning of the
252-
// Entries array
253-
AvailableHead = 0;
254-
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
255-
Entries[I].Next = static_cast<u16>(I + 1);
247+
LRUEntries.clear();
248+
LRUEntries.init(Entries, sizeof(Entries));
256249

257-
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
250+
AvailEntries.clear();
251+
AvailEntries.init(Entries, sizeof(Entries));
252+
for (u32 I = 0; I < Config::getEntriesArraySize(); I++)
253+
AvailEntries.push_back(&Entries[I]);
258254
}
259255

260256
void store(const Options &Options, uptr CommitBase, uptr CommitSize,
@@ -326,11 +322,15 @@ class MapAllocatorCache {
326322
Entry = PrevEntry;
327323
}
328324

329-
// All excess entries are evicted from the cache
330-
while (needToEvict()) {
325+
// All excess entries are evicted from the cache. Note that when
326+
// `MaxEntriesCount` is zero, cache storing shouldn't happen and it's
327+
// guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we
328+
// won't try to pop `LRUEntries` when it's empty.
329+
while (LRUEntries.size() >= atomic_load_relaxed(&MaxEntriesCount)) {
331330
// Save MemMaps of evicted entries to perform unmap outside of lock
332-
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
333-
remove(LRUTail);
331+
CachedBlock *Entry = LRUEntries.back();
332+
EvictionMemMaps.push_back(Entry->MemMap);
333+
remove(Entry);
334334
}
335335

336336
insert(Entry);
@@ -360,9 +360,9 @@ class MapAllocatorCache {
360360
{
361361
ScopedLock L(Mutex);
362362
CallsToRetrieve++;
363-
if (EntriesCount == 0)
363+
if (LRUEntries.size() == 0)
364364
return {};
365-
u16 RetrievedIndex = CachedBlock::InvalidEntry;
365+
CachedBlock *RetrievedEntry = nullptr;
366366
uptr MinDiff = UINTPTR_MAX;
367367

368368
// Since allocation sizes don't always match cached memory chunk sizes
@@ -382,10 +382,9 @@ class MapAllocatorCache {
382382
// well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383383
// MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384384
// not considered valid for retrieval.
385-
for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
386-
I = Entries[I].Next) {
387-
const uptr CommitBase = Entries[I].CommitBase;
388-
const uptr CommitSize = Entries[I].CommitSize;
385+
for (CachedBlock &Entry : LRUEntries) {
386+
const uptr CommitBase = Entry.CommitBase;
387+
const uptr CommitSize = Entry.CommitSize;
389388
const uptr AllocPos =
390389
roundDown(CommitBase + CommitSize - Size, Alignment);
391390
const uptr HeaderPos = AllocPos - HeadersSize;
@@ -408,7 +407,7 @@ class MapAllocatorCache {
408407
continue;
409408

410409
MinDiff = Diff;
411-
RetrievedIndex = I;
410+
RetrievedEntry = &Entry;
412411
EntryHeaderPos = HeaderPos;
413412

414413
// Immediately use a cached block if its size is close enough to the
@@ -418,9 +417,10 @@ class MapAllocatorCache {
418417
if (Diff <= OptimalFitThesholdBytes)
419418
break;
420419
}
421-
if (RetrievedIndex != CachedBlock::InvalidEntry) {
422-
Entry = Entries[RetrievedIndex];
423-
remove(RetrievedIndex);
420+
421+
if (RetrievedEntry != nullptr) {
422+
Entry = *RetrievedEntry;
423+
remove(RetrievedEntry);
424424
SuccessfulRetrieves++;
425425
}
426426
}
@@ -499,10 +499,8 @@ class MapAllocatorCache {
499499
Quarantine[I].invalidate();
500500
}
501501
}
502-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
503-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
504-
Entries[I].CommitSize, 0);
505-
}
502+
for (CachedBlock &Entry : LRUEntries)
503+
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
506504
QuarantinePos = -1U;
507505
}
508506

@@ -513,79 +511,30 @@ class MapAllocatorCache {
513511
void unmapTestOnly() { empty(); }
514512

515513
private:
516-
bool needToEvict() REQUIRES(Mutex) {
517-
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
518-
}
519-
520514
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
521-
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
522-
523-
// Cache should be populated with valid entries when not empty
524-
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
525-
526-
u32 FreeIndex = AvailableHead;
527-
AvailableHead = Entries[AvailableHead].Next;
528-
529-
if (EntriesCount == 0) {
530-
LRUTail = static_cast<u16>(FreeIndex);
531-
} else {
532-
// Check list order
533-
if (EntriesCount > 1)
534-
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
535-
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
536-
}
515+
CachedBlock *AvailEntry = AvailEntries.front();
516+
AvailEntries.pop_front();
537517

538-
Entries[FreeIndex] = Entry;
539-
Entries[FreeIndex].Next = LRUHead;
540-
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
541-
LRUHead = static_cast<u16>(FreeIndex);
542-
EntriesCount++;
543-
544-
// Availability stack should not have available entries when all entries
545-
// are in use
546-
if (EntriesCount == Config::getEntriesArraySize())
547-
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
518+
*AvailEntry = Entry;
519+
LRUEntries.push_front(AvailEntry);
548520
}
549521

550-
void remove(uptr I) REQUIRES(Mutex) {
551-
DCHECK(Entries[I].isValid());
552-
553-
Entries[I].invalidate();
554-
555-
if (I == LRUHead)
556-
LRUHead = Entries[I].Next;
557-
else
558-
Entries[Entries[I].Prev].Next = Entries[I].Next;
559-
560-
if (I == LRUTail)
561-
LRUTail = Entries[I].Prev;
562-
else
563-
Entries[Entries[I].Next].Prev = Entries[I].Prev;
564-
565-
Entries[I].Next = AvailableHead;
566-
AvailableHead = static_cast<u16>(I);
567-
EntriesCount--;
568-
569-
// Cache should not have valid entries when not empty
570-
if (EntriesCount == 0) {
571-
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
572-
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
573-
}
522+
void remove(CachedBlock *Entry) REQUIRES(Mutex) {
523+
DCHECK(Entry->isValid());
524+
LRUEntries.remove(Entry);
525+
Entry->invalidate();
526+
AvailEntries.push_front(Entry);
574527
}
575528

576529
void empty() {
577530
MemMapT MapInfo[Config::getEntriesArraySize()];
578531
uptr N = 0;
579532
{
580533
ScopedLock L(Mutex);
581-
for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
582-
if (!Entries[I].isValid())
583-
continue;
584-
MapInfo[N] = Entries[I].MemMap;
585-
remove(I);
586-
N++;
587-
}
588-
EntriesCount = 0;
534+
535+
for (CachedBlock &Entry : LRUEntries)
536+
MapInfo[N++] = Entry.MemMap;
537+
LRUEntries.clear();
589538
}
590539
for (uptr I = 0; I < N; I++) {
591540
MemMapT &MemMap = MapInfo[I];
@@ -607,7 +556,7 @@ class MapAllocatorCache {
607556

608557
void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
609558
ScopedLock L(Mutex);
610-
if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
559+
if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
611560
return;
612561
OldestTime = 0;
613562
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
@@ -617,7 +566,6 @@ class MapAllocatorCache {
617566
}
618567

619568
HybridMutex Mutex;
620-
u32 EntriesCount GUARDED_BY(Mutex) = 0;
621569
u32 QuarantinePos GUARDED_BY(Mutex) = 0;
622570
atomic_u32 MaxEntriesCount = {};
623571
atomic_uptr MaxEntrySize = {};
@@ -630,12 +578,10 @@ class MapAllocatorCache {
630578
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
631579
Quarantine GUARDED_BY(Mutex) = {};
632580

633-
// The LRUHead of the cache is the most recently used cache entry
634-
u16 LRUHead GUARDED_BY(Mutex) = 0;
635-
// The LRUTail of the cache is the least recently used cache entry
636-
u16 LRUTail GUARDED_BY(Mutex) = 0;
637-
// The AvailableHead is the top of the stack of available entries
638-
u16 AvailableHead GUARDED_BY(Mutex) = 0;
581+
// Cached blocks stored in LRU order
582+
DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
583+
// The unused Entries
584+
SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex);
639585
};
640586

641587
template <typename Config> class MapAllocator {

0 commit comments

Comments
 (0)