Skip to content

Commit b7e93c9

Browse files
committed
[scudo] Added LRU eviction policy to secondary cache.
The logic for emptying the cache now follows an LRU eviction policy. When the cache is full on any given free operation, the oldest entry in the cache is evicted, and the memory associated with that cache entry is unmapped. Finding empty cache entries is now a constant operation with the use of a stack of available cache entries. Through the LRU structure, the cache retrieval algorithm now only iterates through valid entries of the cache. Furthermore, the retrieval algorithm will first search cache entries that have not been decommitted (i.e. madvise() has not been called on their corresponding memory chunks) to reduce the likelihood of returning a memory chunk to the user that would induce a page fault.
1 parent 9ce5b38 commit b7e93c9

File tree

1 file changed

+126
-42
lines changed

1 file changed

+126
-42
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 126 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "stats.h"
2020
#include "string_utils.h"
2121
#include "thread_annotations.h"
22+
#include "vector.h"
2223

2324
namespace scudo {
2425

@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
7374
}
7475

7576
namespace {
77+
7678
struct CachedBlock {
79+
static constexpr u16 CacheIndexMax = UINT16_MAX;
80+
static constexpr u16 InvalidEntry = CacheIndexMax;
81+
7782
uptr CommitBase = 0;
7883
uptr CommitSize = 0;
7984
uptr BlockBegin = 0;
8085
MemMapT MemMap = {};
8186
u64 Time = 0;
87+
u16 Next = 0;
88+
u16 Prev = 0;
8289

8390
bool isValid() { return CommitBase != 0; }
8491

@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188195
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189196
"(%zu.%02zu%%)\n",
190197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191-
for (CachedBlock Entry : Entries) {
192-
if (!Entry.isValid())
193-
continue;
194-
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198+
Str->append("Cache Entry Dump (Most Recent -> Least Recent):\n");
199+
200+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
201+
CachedBlock &Entry = Entries[I];
202+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195203
"BlockSize: %zu %s\n",
196204
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
197205
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202210
static_assert(Config::getDefaultMaxEntriesCount() <=
203211
Config::getEntriesArraySize(),
204212
"");
213+
// Ensure the cache entry array size fits in the LRU list Next and Prev
214+
// index fields
215+
static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216+
"");
205217

206218
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207219
DCHECK_EQ(EntriesCount, 0U);
@@ -213,23 +225,35 @@ template <typename Config> class MapAllocatorCache {
213225
if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
214226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
215227
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
228+
229+
// The cache is initially empty
230+
LRUHead = CachedBlock::InvalidEntry;
231+
LRUTail = CachedBlock::InvalidEntry;
232+
233+
// Available entries will be retrieved starting from the beginning of the
234+
// Entries array
235+
AvailableHead = 0;
236+
for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
237+
Entries[I].Next = static_cast<u16>(I + 1);
238+
239+
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
216240
}
217241

218242
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219243
if (!canCache(H->CommitSize))
220244
return unmap(H);
221245

222-
bool EntryCached = false;
223-
bool EmptyCache = false;
224246
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
225-
const u64 Time = getMonotonicTimeFast();
226247
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
248+
u64 Time;
227249
CachedBlock Entry;
250+
Vector<MemMapT, 1U> EvictionMemMaps;
251+
228252
Entry.CommitBase = H->CommitBase;
229253
Entry.CommitSize = H->CommitSize;
230254
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
231255
Entry.MemMap = H->MemMap;
232-
Entry.Time = Time;
256+
Entry.Time = UINT64_MAX;
233257
if (useMemoryTagging<Config>(Options)) {
234258
if (Interval == 0 && !SCUDO_FUCHSIA) {
235259
// Release the memory and make it inaccessible at the same time by
@@ -249,11 +273,17 @@ template <typename Config> class MapAllocatorCache {
249273
}
250274
do {
251275
ScopedLock L(Mutex);
276+
277+
Time = getMonotonicTimeFast();
278+
if (Entry.Time != 0)
279+
Entry.Time = Time;
280+
252281
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
253282
// If we get here then memory tagging was disabled in between when we
254283
// read Options and when we locked Mutex. We can't insert our entry into
255284
// the quarantine or the cache because the permissions would be wrong so
256285
// just unmap it.
286+
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
257287
break;
258288
}
259289
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -269,30 +299,27 @@ template <typename Config> class MapAllocatorCache {
269299
OldestTime = Entry.Time;
270300
Entry = PrevEntry;
271301
}
272-
if (EntriesCount >= MaxCount) {
273-
if (IsFullEvents++ == 4U)
274-
EmptyCache = true;
275-
} else {
276-
for (u32 I = 0; I < MaxCount; I++) {
277-
if (Entries[I].isValid())
278-
continue;
279-
if (I != 0)
280-
Entries[I] = Entries[0];
281-
Entries[0] = Entry;
282-
EntriesCount++;
283-
if (OldestTime == 0)
284-
OldestTime = Entry.Time;
285-
EntryCached = true;
286-
break;
287-
}
302+
303+
// All excess entries are evicted from the cache
304+
while (EntriesCount >= MaxCount) {
305+
// Save MemMaps of evicted entries to perform unmap outside of lock
306+
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
307+
remove(LRUTail);
288308
}
309+
310+
insert(Entry);
311+
312+
if (OldestTime == 0)
313+
OldestTime = Entry.Time;
289314
} while (0);
290-
if (EmptyCache)
291-
empty();
292-
else if (Interval >= 0)
315+
316+
for (MemMapT &EvictMemMap : EvictionMemMaps)
317+
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
318+
319+
if (Interval >= 0) {
320+
// TODO: Add ReleaseToOS logic to LRU algorithm
293321
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
294-
if (!EntryCached)
295-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
322+
}
296323
}
297324

298325
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +339,8 @@ template <typename Config> class MapAllocatorCache {
312339
return false;
313340
u32 OptimalFitIndex = 0;
314341
uptr MinDiff = UINTPTR_MAX;
315-
for (u32 I = 0; I < MaxCount; I++) {
316-
if (!Entries[I].isValid())
317-
continue;
342+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
343+
I = Entries[I].Next) {
318344
const uptr CommitBase = Entries[I].CommitBase;
319345
const uptr CommitSize = Entries[I].CommitSize;
320346
const uptr AllocPos =
@@ -347,8 +373,7 @@ template <typename Config> class MapAllocatorCache {
347373
}
348374
if (Found) {
349375
Entry = Entries[OptimalFitIndex];
350-
Entries[OptimalFitIndex].invalidate();
351-
EntriesCount--;
376+
remove(OptimalFitIndex);
352377
SuccessfulRetrieves++;
353378
}
354379
}
@@ -410,19 +435,17 @@ template <typename Config> class MapAllocatorCache {
410435

411436
void disableMemoryTagging() EXCLUDES(Mutex) {
412437
ScopedLock L(Mutex);
413-
for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
438+
for (u32 I = 0; I != Config::getQuarantineSize(); I++) {
414439
if (Quarantine[I].isValid()) {
415440
MemMapT &MemMap = Quarantine[I].MemMap;
416441
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
417442
Quarantine[I].invalidate();
418443
}
419444
}
420445
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
421-
for (u32 I = 0; I < MaxCount; I++) {
422-
if (Entries[I].isValid()) {
423-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
424-
Entries[I].CommitSize, 0);
425-
}
446+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
447+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
448+
Entries[I].CommitSize, 0);
426449
}
427450
QuarantinePos = -1U;
428451
}
@@ -434,6 +457,62 @@ template <typename Config> class MapAllocatorCache {
434457
void unmapTestOnly() { empty(); }
435458

436459
private:
460+
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
461+
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
462+
463+
// Cache should be populated with valid entries when not empty
464+
DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
465+
466+
u32 FreeIndex = AvailableHead;
467+
AvailableHead = Entries[AvailableHead].Next;
468+
469+
if (EntriesCount == 0) {
470+
LRUTail = static_cast<u16>(FreeIndex);
471+
} else {
472+
// Check list order
473+
if (EntriesCount > 1)
474+
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
475+
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
476+
}
477+
478+
Entries[FreeIndex] = Entry;
479+
Entries[FreeIndex].Next = LRUHead;
480+
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
481+
LRUHead = static_cast<u16>(FreeIndex);
482+
EntriesCount++;
483+
484+
// Availability stack should not have available entries when all entries
485+
// are in use
486+
if (EntriesCount == Config::getEntriesArraySize())
487+
DCHECK(AvailableHead == CachedBlock::InvalidEntry);
488+
}
489+
490+
void remove(uptr I) REQUIRES(Mutex) {
491+
DCHECK(Entries[I].isValid());
492+
493+
Entries[I].invalidate();
494+
495+
if (I == LRUHead)
496+
LRUHead = Entries[I].Next;
497+
else
498+
Entries[Entries[I].Prev].Next = Entries[I].Next;
499+
500+
if (I == LRUTail)
501+
LRUTail = Entries[I].Prev;
502+
else
503+
Entries[Entries[I].Next].Prev = Entries[I].Prev;
504+
505+
Entries[I].Next = AvailableHead;
506+
AvailableHead = static_cast<u16>(I);
507+
EntriesCount--;
508+
509+
// Cache should not have valid entries when not empty
510+
if (EntriesCount == 0) {
511+
DCHECK(LRUHead == CachedBlock::InvalidEntry);
512+
DCHECK(LRUTail == CachedBlock::InvalidEntry);
513+
}
514+
}
515+
437516
void empty() {
438517
MemMapT MapInfo[Config::getEntriesArraySize()];
439518
uptr N = 0;
@@ -447,7 +526,6 @@ template <typename Config> class MapAllocatorCache {
447526
N++;
448527
}
449528
EntriesCount = 0;
450-
IsFullEvents = 0;
451529
}
452530
for (uptr I = 0; I < N; I++) {
453531
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +562,20 @@ template <typename Config> class MapAllocatorCache {
484562
atomic_u32 MaxEntriesCount = {};
485563
atomic_uptr MaxEntrySize = {};
486564
u64 OldestTime GUARDED_BY(Mutex) = 0;
487-
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
488565
atomic_s32 ReleaseToOsIntervalMs = {};
489566
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
490567
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
491568

492569
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493570
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494571
Quarantine GUARDED_BY(Mutex) = {};
572+
573+
// The LRUHead of the cache is the most recently used cache entry
574+
// The LRUTail of the cache is the least recently used cache entry
575+
// The AvailableHead is the top of the stack of available entries
576+
u16 LRUHead GUARDED_BY(Mutex) = 0;
577+
u16 LRUTail GUARDED_BY(Mutex) = 0;
578+
u16 AvailableHead GUARDED_BY(Mutex) = 0;
495579
};
496580

497581
template <typename Config> class MapAllocator {

0 commit comments

Comments
 (0)