Skip to content

Commit 56cd664

Browse files
committed
[scudo] Added LRU eviction policy to secondary cache.
The logic for emptying the cache now follows an LRU eviction policy. When the cache is full on any given free operation, the oldest entry in the cache is evicted, and the memory associated with that cache entry is unmapped. Finding empty cache entries is now a constant operation with the use of a stack of available cache entries. Through the LRU structure, the cache retrieval algorithm now only iterates through valid entries of the cache. Furthermore, the retrieval algorithm will first search cache entries that have not been decommitted (i.e. madvise() has not been called on their corresponding memory chunks) to reduce the likelihood of returning a memory chunk to the user that would induce a page fault.
1 parent 9ce5b38 commit 56cd664

File tree

1 file changed

+138
-45
lines changed

1 file changed

+138
-45
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 138 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "stats.h"
2020
#include "string_utils.h"
2121
#include "thread_annotations.h"
22+
#include "vector.h"
2223

2324
namespace scudo {
2425

@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
7374
}
7475

7576
namespace {
77+
7678
struct CachedBlock {
79+
static constexpr u16 CacheIndexMax = UINT16_MAX;
80+
static constexpr u16 InvalidEntry = CacheIndexMax;
81+
7782
uptr CommitBase = 0;
7883
uptr CommitSize = 0;
7984
uptr BlockBegin = 0;
8085
MemMapT MemMap = {};
8186
u64 Time = 0;
87+
u16 Next = 0;
88+
u16 Prev = 0;
8289

8390
bool isValid() { return CommitBase != 0; }
8491

@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188195
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189196
"(%zu.%02zu%%)\n",
190197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191-
for (CachedBlock Entry : Entries) {
192-
if (!Entry.isValid())
193-
continue;
194-
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198+
Str->append("Cache Entry Dump (Most Recent -> Least Recent):\n");
199+
200+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
201+
CachedBlock &Entry = Entries[I];
202+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195203
"BlockSize: %zu %s\n",
196204
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197205
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202210
static_assert(Config::getDefaultMaxEntriesCount() <=
203211
Config::getEntriesArraySize(),
204212
"");
213+
// Ensure the cache entry array size fits in the LRU list Next and Prev
214+
// index fields
215+
static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216+
"Cache entry array is too large to be indexed.");
205217

206218
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207219
DCHECK_EQ(EntriesCount, 0U);
@@ -213,23 +225,39 @@ template <typename Config> class MapAllocatorCache {
213225
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215227
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
228+
229+
// The cache is initially empty
230+
LRUHead = CachedBlock::InvalidEntry;
231+
LRUTail = CachedBlock::InvalidEntry;
232+
233+
// Available entries will be retrieved starting from the beginning of the
234+
// Entries array
235+
AvailableHead = 0;
236+
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
237+
Entries[I].Next = static_cast<u16>(I + 1);
238+
239+
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
216240
}
217241

218242
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219243
if (!canCache(H->CommitSize))
220244
return unmap(H);
221245

222-
bool EntryCached = false;
223-
bool EmptyCache = false;
224246
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225-
const u64 Time = getMonotonicTimeFast();
226-
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
247+
u64 Time;
227248
CachedBlock Entry;
249+
250+
// Usually only one entry will be evicted from the cache.
251+
// Only in the rare event that the cache shrinks in real-time
252+
// due to a decrease in the configurable value MaxEntriesCount
253+
// will more than one cache entry be evicted
254+
Vector<MemMapT, 1U> EvictionMemMaps;
255+
228256
Entry.CommitBase = H->CommitBase;
229257
Entry.CommitSize = H->CommitSize;
230258
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
231259
Entry.MemMap = H->MemMap;
232-
Entry.Time = Time;
260+
Entry.Time = UINT64_MAX;
233261
if (useMemoryTagging<Config>(Options)) {
234262
if (Interval == 0 && !SCUDO_FUCHSIA) {
235263
// Release the memory and make it inaccessible at the same time by
@@ -243,17 +271,24 @@ template <typename Config> class MapAllocatorCache {
243271
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
244272
MAP_NOACCESS);
245273
}
246-
} else if (Interval == 0) {
247-
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
248-
Entry.Time = 0;
249274
}
275+
250276
do {
251277
ScopedLock L(Mutex);
278+
279+
// Time must be computed under the lock to ensure
280+
// that the LRU cache remains sorted with respect to
281+
// time in a multithreaded environment
282+
Time = getMonotonicTimeFast();
283+
if (Entry.Time != 0)
284+
Entry.Time = Time;
285+
252286
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
253287
// If we get here then memory tagging was disabled in between when we
254288
// read Options and when we locked Mutex. We can't insert our entry into
255289
// the quarantine or the cache because the permissions would be wrong so
256290
// just unmap it.
291+
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
257292
break;
258293
}
259294
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -269,30 +304,27 @@ template <typename Config> class MapAllocatorCache {
269304
OldestTime = Entry.Time;
270305
Entry = PrevEntry;
271306
}
272-
if (EntriesCount >= MaxCount) {
273-
if (IsFullEvents++ == 4U)
274-
EmptyCache = true;
275-
} else {
276-
for (u32 I = 0; I < MaxCount; I++) {
277-
if (Entries[I].isValid())
278-
continue;
279-
if (I != 0)
280-
Entries[I] = Entries[0];
281-
Entries[0] = Entry;
282-
EntriesCount++;
283-
if (OldestTime == 0)
284-
OldestTime = Entry.Time;
285-
EntryCached = true;
286-
break;
287-
}
307+
308+
// All excess entries are evicted from the cache
309+
while (needToEvict()) {
310+
// Save MemMaps of evicted entries to perform unmap outside of lock
311+
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
312+
remove(LRUTail);
288313
}
314+
315+
insert(Entry);
316+
317+
if (OldestTime == 0)
318+
OldestTime = Entry.Time;
289319
} while (0);
290-
if (EmptyCache)
291-
empty();
292-
else if (Interval >= 0)
320+
321+
for (MemMapT &EvictMemMap : EvictionMemMaps)
322+
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
323+
324+
if (Interval >= 0) {
325+
// TODO: Add ReleaseToOS logic to LRU algorithm
293326
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294-
if (!EntryCached)
295-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
327+
}
296328
}
297329

298330
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +344,8 @@ template <typename Config> class MapAllocatorCache {
312344
return false;
313345
u32 OptimalFitIndex = 0;
314346
uptr MinDiff = UINTPTR_MAX;
315-
for (u32 I = 0; I < MaxCount; I++) {
316-
if (!Entries[I].isValid())
317-
continue;
347+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
348+
I = Entries[I].Next) {
318349
const uptr CommitBase = Entries[I].CommitBase;
319350
const uptr CommitSize = Entries[I].CommitSize;
320351
const uptr AllocPos =
@@ -347,8 +378,7 @@ template <typename Config> class MapAllocatorCache {
347378
}
348379
if (Found) {
349380
Entry = Entries[OptimalFitIndex];
350-
Entries[OptimalFitIndex].invalidate();
351-
EntriesCount--;
381+
remove(OptimalFitIndex);
352382
SuccessfulRetrieves++;
353383
}
354384
}
@@ -418,11 +448,9 @@ template <typename Config> class MapAllocatorCache {
418448
}
419449
}
420450
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
421-
for (u32 I = 0; I < MaxCount; I++) {
422-
if (Entries[I].isValid()) {
423-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
424-
Entries[I].CommitSize, 0);
425-
}
451+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
452+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
453+
Entries[I].CommitSize, 0);
426454
}
427455
QuarantinePos = -1U;
428456
}
@@ -434,6 +462,66 @@ template <typename Config> class MapAllocatorCache {
434462
void unmapTestOnly() { empty(); }
435463

436464
private:
465+
bool needToEvict() REQUIRES(Mutex) {
466+
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
467+
}
468+
469+
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
470+
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
471+
472+
// Cache should be populated with valid entries when not empty
473+
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
474+
475+
u32 FreeIndex = AvailableHead;
476+
AvailableHead = Entries[AvailableHead].Next;
477+
478+
if (EntriesCount == 0) {
479+
LRUTail = static_cast<u16>(FreeIndex);
480+
} else {
481+
// Check list order
482+
if (EntriesCount > 1)
483+
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
484+
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
485+
}
486+
487+
Entries[FreeIndex] = Entry;
488+
Entries[FreeIndex].Next = LRUHead;
489+
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490+
LRUHead = static_cast<u16>(FreeIndex);
491+
EntriesCount++;
492+
493+
// Availability stack should not have available entries when all entries
494+
// are in use
495+
if (EntriesCount == Config::getEntriesArraySize())
496+
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
497+
}
498+
499+
void remove(uptr I) REQUIRES(Mutex) {
500+
DCHECK(Entries[I].isValid());
501+
502+
Entries[I].invalidate();
503+
504+
if (I == LRUHead)
505+
LRUHead = Entries[I].Next;
506+
else
507+
Entries[Entries[I].Prev].Next = Entries[I].Next;
508+
509+
if (I == LRUTail)
510+
LRUTail = Entries[I].Prev;
511+
else
512+
Entries[Entries[I].Next].Prev = Entries[I].Prev;
513+
514+
Entries[I].Next = AvailableHead;
515+
AvailableHead = static_cast<u16>(I);
516+
EntriesCount--;
517+
518+
// Cache should not have valid entries when not empty
519+
if (EntriesCount == 0) {
520+
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
521+
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
522+
}
523+
}
524+
437525
void empty() {
438526
MemMapT MapInfo[Config::getEntriesArraySize()];
439527
uptr N = 0;
@@ -447,7 +535,6 @@ template <typename Config> class MapAllocatorCache {
447535
N++;
448536
}
449537
EntriesCount = 0;
450-
IsFullEvents = 0;
451538
}
452539
for (uptr I = 0; I < N; I++) {
453540
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +571,20 @@ template <typename Config> class MapAllocatorCache {
484571
atomic_u32 MaxEntriesCount = {};
485572
atomic_uptr MaxEntrySize = {};
486573
u64 OldestTime GUARDED_BY(Mutex) = 0;
487-
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
488574
atomic_s32 ReleaseToOsIntervalMs = {};
489575
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
490576
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
491577

492578
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493579
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494580
Quarantine GUARDED_BY(Mutex) = {};
581+
582+
// The LRUHead of the cache is the most recently used cache entry
583+
u16 LRUHead GUARDED_BY(Mutex) = 0;
584+
// The LRUTail of the cache is the least recently used cache entry
585+
u16 LRUTail GUARDED_BY(Mutex) = 0;
586+
// The AvailableHead is the top of the stack of available entries
587+
u16 AvailableHead GUARDED_BY(Mutex) = 0;
495588
};
496589

497590
template <typename Config> class MapAllocator {

0 commit comments

Comments
 (0)