Skip to content

Commit b683317

Browse files
committed
[scudo] Added LRU eviction policy to secondary cache.
The logic for emptying the cache now follows an LRU eviction policy. When the cache is full on any given free operation, the oldest entry in the cache is evicted, and the memory associated with that cache entry is unmapped. Finding empty cache entries is now a constant operation with the use of a stack of available cache entries. Through the LRU structure, the cache retrieval algorithm now only iterates through valid entries of the cache. Furthermore, the retrieval algorithm will first search cache entries that have not been decommitted (i.e. madvise() has not been called on their corresponding memory chunks) to reduce the likelihood of returning a memory chunk to the user that would induce a page fault.
1 parent 9ce5b38 commit b683317

File tree

1 file changed

+119
-40
lines changed

1 file changed

+119
-40
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 119 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "stats.h"
2020
#include "string_utils.h"
2121
#include "thread_annotations.h"
22+
#include "vector.h"
2223

2324
namespace scudo {
2425

@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
7374
}
7475

7576
namespace {
77+
7678
struct CachedBlock {
79+
static constexpr u16 CacheIndexMax = UINT16_MAX;
80+
static constexpr u16 InvalidEntry = CacheIndexMax;
81+
7782
uptr CommitBase = 0;
7883
uptr CommitSize = 0;
7984
uptr BlockBegin = 0;
8085
MemMapT MemMap = {};
8186
u64 Time = 0;
87+
u16 Next = 0;
88+
u16 Prev = 0;
8289

8390
bool isValid() { return CommitBase != 0; }
8491

@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188195
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189196
"(%zu.%02zu%%)\n",
190197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191-
for (CachedBlock Entry : Entries) {
192-
if (!Entry.isValid())
193-
continue;
194-
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198+
Str->append("Cache Entry Dump (Most Recent -> Least Recent):\n");
199+
200+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
201+
CachedBlock &Entry = Entries[I];
202+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195203
"BlockSize: %zu %s\n",
196204
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197205
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202210
static_assert(Config::getDefaultMaxEntriesCount() <=
203211
Config::getEntriesArraySize(),
204212
"");
213+
// Ensure the cache entry array size fits in the LRU list Next and Prev
214+
// index fields
215+
static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216+
"");
205217

206218
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207219
DCHECK_EQ(EntriesCount, 0U);
@@ -213,18 +225,30 @@ template <typename Config> class MapAllocatorCache {
213225
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215227
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
228+
229+
// The cache is initially empty
230+
LRUHead = CachedBlock::InvalidEntry;
231+
LRUTail = CachedBlock::InvalidEntry;
232+
233+
// Available entries will be retrieved starting from the beginning of the
234+
// Entries array
235+
AvailableHead = 0;
236+
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
237+
Entries[I].Next = static_cast<u16>(I + 1);
238+
239+
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
216240
}
217241

218242
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219243
if (!canCache(H->CommitSize))
220244
return unmap(H);
221245

222-
bool EntryCached = false;
223-
bool EmptyCache = false;
224246
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225247
const u64 Time = getMonotonicTimeFast();
226248
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
227249
CachedBlock Entry;
250+
Vector<MemMapT, 1U> EvictionMemMaps;
251+
228252
Entry.CommitBase = H->CommitBase;
229253
Entry.CommitSize = H->CommitSize;
230254
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
@@ -254,6 +278,7 @@ template <typename Config> class MapAllocatorCache {
254278
// read Options and when we locked Mutex. We can't insert our entry into
255279
// the quarantine or the cache because the permissions would be wrong so
256280
// just unmap it.
281+
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
257282
break;
258283
}
259284
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -269,30 +294,27 @@ template <typename Config> class MapAllocatorCache {
269294
OldestTime = Entry.Time;
270295
Entry = PrevEntry;
271296
}
272-
if (EntriesCount >= MaxCount) {
273-
if (IsFullEvents++ == 4U)
274-
EmptyCache = true;
275-
} else {
276-
for (u32 I = 0; I < MaxCount; I++) {
277-
if (Entries[I].isValid())
278-
continue;
279-
if (I != 0)
280-
Entries[I] = Entries[0];
281-
Entries[0] = Entry;
282-
EntriesCount++;
283-
if (OldestTime == 0)
284-
OldestTime = Entry.Time;
285-
EntryCached = true;
286-
break;
287-
}
297+
298+
// All excess entries are evicted from the cache
299+
while (EntriesCount >= MaxCount) {
300+
// Save MemMaps of evicted entries to perform unmap outside of lock
301+
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
302+
remove(LRUTail);
288303
}
304+
305+
insert(Entry);
306+
307+
if (OldestTime == 0)
308+
OldestTime = Entry.Time;
289309
} while (0);
290-
if (EmptyCache)
291-
empty();
292-
else if (Interval >= 0)
310+
311+
for (MemMapT &EvictMemMap : EvictionMemMaps)
312+
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
313+
314+
if (Interval >= 0) {
315+
// TODO: Add ReleaseToOS logic to LRU algorithm
293316
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294-
if (!EntryCached)
295-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
317+
}
296318
}
297319

298320
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +334,8 @@ template <typename Config> class MapAllocatorCache {
312334
return false;
313335
u32 OptimalFitIndex = 0;
314336
uptr MinDiff = UINTPTR_MAX;
315-
for (u32 I = 0; I < MaxCount; I++) {
316-
if (!Entries[I].isValid())
317-
continue;
337+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
338+
I = Entries[I].Next) {
318339
const uptr CommitBase = Entries[I].CommitBase;
319340
const uptr CommitSize = Entries[I].CommitSize;
320341
const uptr AllocPos =
@@ -347,8 +368,7 @@ template <typename Config> class MapAllocatorCache {
347368
}
348369
if (Found) {
349370
Entry = Entries[OptimalFitIndex];
350-
Entries[OptimalFitIndex].invalidate();
351-
EntriesCount--;
371+
remove(OptimalFitIndex);
352372
SuccessfulRetrieves++;
353373
}
354374
}
@@ -410,19 +430,17 @@ template <typename Config> class MapAllocatorCache {
410430

411431
void disableMemoryTagging() EXCLUDES(Mutex) {
412432
ScopedLock L(Mutex);
413-
for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
433+
for (u32 I = 0; I != Config::getQuarantineSize(); I++) {
414434
if (Quarantine[I].isValid()) {
415435
MemMapT &MemMap = Quarantine[I].MemMap;
416436
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
417437
Quarantine[I].invalidate();
418438
}
419439
}
420440
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
421-
for (u32 I = 0; I < MaxCount; I++) {
422-
if (Entries[I].isValid()) {
423-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
424-
Entries[I].CommitSize, 0);
425-
}
441+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
442+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
443+
Entries[I].CommitSize, 0);
426444
}
427445
QuarantinePos = -1U;
428446
}
@@ -434,6 +452,62 @@ template <typename Config> class MapAllocatorCache {
434452
void unmapTestOnly() { empty(); }
435453

436454
private:
455+
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
456+
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
457+
458+
// Cache should be populated with valid entries when not empty
459+
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
460+
461+
u32 FreeIndex = AvailableHead;
462+
AvailableHead = Entries[AvailableHead].Next;
463+
464+
if (EntriesCount == 0) {
465+
LRUTail = static_cast<u16>(FreeIndex);
466+
} else {
467+
// Check list order
468+
if (EntriesCount > 1)
469+
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
470+
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
471+
}
472+
473+
Entries[FreeIndex] = Entry;
474+
Entries[FreeIndex].Next = LRUHead;
475+
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
476+
LRUHead = static_cast<u16>(FreeIndex);
477+
EntriesCount++;
478+
479+
// Availability stack should not have available entries when all entries
480+
// are in use
481+
if (EntriesCount == Config::getEntriesArraySize())
482+
DCHECK(AvailableHead == CachedBlock::InvalidEntry);
483+
}
484+
485+
void remove(uptr I) REQUIRES(Mutex) {
486+
DCHECK(Entries[I].isValid());
487+
488+
Entries[I].invalidate();
489+
490+
if (I == LRUHead)
491+
LRUHead = Entries[I].Next;
492+
else
493+
Entries[Entries[I].Prev].Next = Entries[I].Next;
494+
495+
if (I == LRUTail)
496+
LRUTail = Entries[I].Prev;
497+
else
498+
Entries[Entries[I].Next].Prev = Entries[I].Prev;
499+
500+
Entries[I].Next = AvailableHead;
501+
AvailableHead = static_cast<u16>(I);
502+
EntriesCount--;
503+
504+
// Cache should not have valid entries when not empty
505+
if (EntriesCount == 0) {
506+
DCHECK(LRUHead == CachedBlock::InvalidEntry);
507+
DCHECK(LRUTail == CachedBlock::InvalidEntry);
508+
}
509+
}
510+
437511
void empty() {
438512
MemMapT MapInfo[Config::getEntriesArraySize()];
439513
uptr N = 0;
@@ -447,7 +521,6 @@ template <typename Config> class MapAllocatorCache {
447521
N++;
448522
}
449523
EntriesCount = 0;
450-
IsFullEvents = 0;
451524
}
452525
for (uptr I = 0; I < N; I++) {
453526
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +557,20 @@ template <typename Config> class MapAllocatorCache {
484557
atomic_u32 MaxEntriesCount = {};
485558
atomic_uptr MaxEntrySize = {};
486559
u64 OldestTime GUARDED_BY(Mutex) = 0;
487-
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
488560
atomic_s32 ReleaseToOsIntervalMs = {};
489561
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
490562
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
491563

492564
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493565
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494566
Quarantine GUARDED_BY(Mutex) = {};
567+
568+
// The LRUHead of the cache is the most recently used cache entry
569+
// The LRUTail of the cache is the least recently used cache entry
570+
// The AvailableHead is the top of the stack of available entries
571+
u16 LRUHead GUARDED_BY(Mutex) = 0;
572+
u16 LRUTail GUARDED_BY(Mutex) = 0;
573+
u16 AvailableHead GUARDED_BY(Mutex) = 0;
495574
};
496575

497576
template <typename Config> class MapAllocator {

0 commit comments

Comments
 (0)