Skip to content

Commit 396d9a8

Browse files
committed
[scudo] Separated committed and decommitted entries.
Initially, the LRU list stored all mapped entries with no distinction between the committed (non-madvise()'d) entries and decommitted (madvise()'d) entries. Now these two types of entries separated into two lists, allowing future cache logic to branch depending on whether or not entries are committed or decommitted. Furthermore, the retrieval algorithm will prioritize committed entries over decommitted entries. Specifically, committed entries that satisfy the MaxUnusedCachePages requirement are retrieved before optimal-fit, decommitted entries.
1 parent 1a6d60e commit 396d9a8

File tree

1 file changed

+140
-75
lines changed

1 file changed

+140
-75
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 140 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
184184
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
185185
class MapAllocatorCache {
186186
public:
187+
typedef enum { COMMITTED = 0, DECOMMITTED = 1, NONE } EntryListT;
188+
189+
// TODO: Refactor the intrusive list to support non-pointer link type
190+
typedef struct {
191+
u16 Head;
192+
u16 Tail;
193+
} ListInfo;
194+
187195
void getStats(ScopedString *Str) {
188196
ScopedLock L(Mutex);
189197
uptr Integral;
@@ -201,13 +209,18 @@ class MapAllocatorCache {
201209
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
202210
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
203211

204-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
205-
CachedBlock &Entry = Entries[I];
206-
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
207-
"BlockSize: %zu %s\n",
208-
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
209-
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
210-
}
212+
auto printList = [&](EntryListT ListType) REQUIRES(Mutex) {
213+
for (u32 I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
214+
I = Entries[I].Next) {
215+
CachedBlock &Entry = Entries[I];
216+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
217+
"BlockSize: %zu %s\n",
218+
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
219+
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
220+
}
221+
};
222+
printList(COMMITTED);
223+
printList(DECOMMITTED);
211224
}
212225

213226
// Ensure the default maximum specified fits the array.
@@ -231,8 +244,10 @@ class MapAllocatorCache {
231244
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
232245

233246
// The cache is initially empty
234-
LRUHead = CachedBlock::InvalidEntry;
235-
LRUTail = CachedBlock::InvalidEntry;
247+
EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
248+
EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
249+
EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
250+
EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
236251

237252
// Available entries will be retrieved starting from the beginning of the
238253
// Entries array
@@ -250,7 +265,6 @@ class MapAllocatorCache {
250265
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
251266
u64 Time;
252267
CachedBlock Entry;
253-
254268
Entry.CommitBase = CommitBase;
255269
Entry.CommitSize = CommitSize;
256270
Entry.BlockBegin = BlockBegin;
@@ -314,16 +328,22 @@ class MapAllocatorCache {
314328

315329
// All excess entries are evicted from the cache
316330
while (needToEvict()) {
331+
EntryListT EvictionListType;
332+
if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
333+
EvictionListType = COMMITTED;
334+
else
335+
EvictionListType = DECOMMITTED;
317336
// Save MemMaps of evicted entries to perform unmap outside of lock
318-
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
319-
remove(LRUTail);
337+
EvictionMemMaps.push_back(
338+
Entries[EntryLists[EvictionListType].Tail].MemMap);
339+
remove(EntryLists[EvictionListType].Tail, EvictionListType);
320340
}
321341

322-
insert(Entry);
342+
insert(Entry, (Entry.Time == 0) ? DECOMMITTED : COMMITTED);
323343

324344
if (OldestTime == 0)
325345
OldestTime = Entry.Time;
326-
} while (0);
346+
} while (0); // ScopedLock L(Mutex);
327347

328348
for (MemMapT &EvictMemMap : EvictionMemMaps)
329349
unmapCallBack(EvictMemMap);
@@ -340,17 +360,14 @@ class MapAllocatorCache {
340360
// 10% of the requested size proved to be the optimal choice for
341361
// retrieving cached blocks after testing several options.
342362
constexpr u32 FragmentedBytesDivisor = 10;
343-
bool Found = false;
344363
CachedBlock Entry;
364+
uptr OptimalFitIndex = CachedBlock::InvalidEntry;
365+
uptr MinDiff = UINTPTR_MAX;
366+
EntryListT OptimalFitListType = NONE;
345367
EntryHeaderPos = 0;
346-
{
347-
ScopedLock L(Mutex);
348-
CallsToRetrieve++;
349-
if (EntriesCount == 0)
350-
return {};
351-
u32 OptimalFitIndex = 0;
352-
uptr MinDiff = UINTPTR_MAX;
353-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
368+
369+
auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES(Mutex) {
370+
for (uptr I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
354371
I = Entries[I].Next) {
355372
const uptr CommitBase = Entries[I].CommitBase;
356373
const uptr CommitSize = Entries[I].CommitSize;
@@ -360,34 +377,48 @@ class MapAllocatorCache {
360377
if (HeaderPos > CommitBase + CommitSize)
361378
continue;
362379
if (HeaderPos < CommitBase ||
363-
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
380+
AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
364381
continue;
365-
}
366-
Found = true;
382+
367383
const uptr Diff = HeaderPos - CommitBase;
368-
// immediately use a cached block if it's size is close enough to the
369-
// requested size.
384+
// immediately use a cached block if it's size is close enough to
385+
// the requested size.
370386
const uptr MaxAllowedFragmentedBytes =
371387
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
372388
if (Diff <= MaxAllowedFragmentedBytes) {
373389
OptimalFitIndex = I;
374390
EntryHeaderPos = HeaderPos;
375-
break;
391+
OptimalFitListType = ListType;
392+
return true;
376393
}
394+
377395
// keep track of the smallest cached block
378396
// that is greater than (AllocSize + HeaderSize)
379397
if (Diff > MinDiff)
380398
continue;
381399
OptimalFitIndex = I;
382400
MinDiff = Diff;
401+
OptimalFitListType = ListType;
383402
EntryHeaderPos = HeaderPos;
384403
}
385-
if (Found) {
386-
Entry = Entries[OptimalFitIndex];
387-
remove(OptimalFitIndex);
388-
SuccessfulRetrieves++;
389-
}
390-
}
404+
return (OptimalFitIndex != CachedBlock::InvalidEntry);
405+
};
406+
407+
{
408+
ScopedLock L(Mutex);
409+
CallsToRetrieve++;
410+
if (EntriesCount == 0)
411+
return {};
412+
413+
// Prioritize valid fit from COMMITTED entries over
414+
// optimal fit from DECOMMITTED entries
415+
if (!FindAvailableEntry(COMMITTED) && !FindAvailableEntry(DECOMMITTED))
416+
return {};
417+
418+
Entry = Entries[OptimalFitIndex];
419+
remove(OptimalFitIndex, OptimalFitListType);
420+
SuccessfulRetrieves++;
421+
} // ScopedLock L(Mutex);
391422

392423
return Entry;
393424
}
@@ -432,10 +463,15 @@ class MapAllocatorCache {
432463
Quarantine[I].invalidate();
433464
}
434465
}
435-
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
436-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
437-
Entries[I].CommitSize, 0);
438-
}
466+
auto disableLists = [&](EntryListT EntryList) REQUIRES(Mutex) {
467+
for (u32 I = EntryLists[EntryList].Head; I != CachedBlock::InvalidEntry;
468+
I = Entries[I].Next) {
469+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
470+
Entries[I].CommitSize, 0);
471+
}
472+
};
473+
disableLists(COMMITTED);
474+
disableLists(DECOMMITTED);
439475
QuarantinePos = -1U;
440476
}
441477

@@ -450,7 +486,7 @@ class MapAllocatorCache {
450486
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
451487
}
452488

453-
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
489+
void insert(const CachedBlock &Entry, EntryListT ListType) REQUIRES(Mutex) {
454490
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
455491

456492
// Cache should be populated with valid entries when not empty
@@ -459,66 +495,86 @@ class MapAllocatorCache {
459495
u32 FreeIndex = AvailableHead;
460496
AvailableHead = Entries[AvailableHead].Next;
461497

462-
if (EntriesCount == 0) {
463-
LRUTail = static_cast<u16>(FreeIndex);
464-
} else {
465-
// Check list order
466-
if (EntriesCount > 1)
467-
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
468-
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
469-
}
470-
471498
Entries[FreeIndex] = Entry;
472-
Entries[FreeIndex].Next = LRUHead;
473-
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
474-
LRUHead = static_cast<u16>(FreeIndex);
499+
pushFront(FreeIndex, ListType);
475500
EntriesCount++;
476501

502+
if (Entries[EntryLists[ListType].Head].Next != CachedBlock::InvalidEntry) {
503+
DCHECK_GE(Entries[EntryLists[ListType].Head].Time,
504+
Entries[Entries[EntryLists[ListType].Head].Next].Time);
505+
}
477506
// Availability stack should not have available entries when all entries
478507
// are in use
479508
if (EntriesCount == Config::getEntriesArraySize())
480509
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
481510
}
482511

483-
void remove(uptr I) REQUIRES(Mutex) {
484-
DCHECK(Entries[I].isValid());
485-
486-
Entries[I].invalidate();
487-
488-
if (I == LRUHead)
489-
LRUHead = Entries[I].Next;
512+
// Joins the entries adjacent to Entries[I], effectively
513+
// unlinking Entries[I] from the list
514+
void unlink(uptr I, EntryListT ListType) REQUIRES(Mutex) {
515+
if (I == EntryLists[ListType].Head)
516+
EntryLists[ListType].Head = Entries[I].Next;
490517
else
491518
Entries[Entries[I].Prev].Next = Entries[I].Next;
492519

493-
if (I == LRUTail)
494-
LRUTail = Entries[I].Prev;
520+
if (I == EntryLists[ListType].Tail)
521+
EntryLists[ListType].Tail = Entries[I].Prev;
495522
else
496523
Entries[Entries[I].Next].Prev = Entries[I].Prev;
524+
}
525+
526+
// Invalidates Entries[I], removes Entries[I] from list, and pushes
527+
// Entries[I] onto the stack of available entries
528+
void remove(uptr I, EntryListT ListType) REQUIRES(Mutex) {
529+
DCHECK(Entries[I].isValid());
530+
531+
Entries[I].invalidate();
497532

533+
unlink(I, ListType);
498534
Entries[I].Next = AvailableHead;
499535
AvailableHead = static_cast<u16>(I);
500536
EntriesCount--;
501537

502538
// Cache should not have valid entries when not empty
503539
if (EntriesCount == 0) {
504-
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
505-
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
540+
DCHECK_EQ(EntryLists[COMMITTED].Head, CachedBlock::InvalidEntry);
541+
DCHECK_EQ(EntryLists[COMMITTED].Tail, CachedBlock::InvalidEntry);
542+
DCHECK_EQ(EntryLists[DECOMMITTED].Head, CachedBlock::InvalidEntry);
543+
DCHECK_EQ(EntryLists[DECOMMITTED].Tail, CachedBlock::InvalidEntry);
506544
}
507545
}
508546

547+
inline void pushFront(uptr I, EntryListT ListType) REQUIRES(Mutex) {
548+
if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
549+
EntryLists[ListType].Tail = static_cast<u16>(I);
550+
else
551+
Entries[EntryLists[ListType].Head].Prev = static_cast<u16>(I);
552+
553+
Entries[I].Next = EntryLists[ListType].Head;
554+
Entries[I].Prev = CachedBlock::InvalidEntry;
555+
EntryLists[ListType].Head = static_cast<u16>(I);
556+
}
557+
509558
void empty() {
510559
MemMapT MapInfo[Config::getEntriesArraySize()];
511560
uptr N = 0;
512561
{
513562
ScopedLock L(Mutex);
514-
for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
515-
if (!Entries[I].isValid())
516-
continue;
517-
MapInfo[N] = Entries[I].MemMap;
518-
remove(I);
519-
N++;
520-
}
563+
auto emptyList = [&](EntryListT ListType) REQUIRES(Mutex) {
564+
for (uptr I = EntryLists[ListType].Head;
565+
I != CachedBlock::InvalidEntry;) {
566+
uptr ToRemove = I;
567+
I = Entries[I].Next;
568+
MapInfo[N] = Entries[ToRemove].MemMap;
569+
remove(ToRemove, ListType);
570+
N++;
571+
}
572+
};
573+
emptyList(COMMITTED);
574+
emptyList(DECOMMITTED);
521575
EntriesCount = 0;
576+
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
577+
DCHECK(!Entries[I].isValid());
522578
}
523579
for (uptr I = 0; I < N; I++) {
524580
MemMapT &MemMap = MapInfo[I];
@@ -545,8 +601,14 @@ class MapAllocatorCache {
545601
OldestTime = 0;
546602
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
547603
releaseIfOlderThan(Quarantine[I], Time);
548-
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
604+
for (u16 I = EntryLists[COMMITTED].Head; I != CachedBlock::InvalidEntry;
605+
I = Entries[I].Next) {
606+
if (Entries[I].Time && Entries[I].Time <= Time) {
607+
unlink(I, COMMITTED);
608+
pushFront(I, DECOMMITTED);
609+
}
549610
releaseIfOlderThan(Entries[I], Time);
611+
}
550612
}
551613

552614
HybridMutex Mutex;
@@ -563,10 +625,12 @@ class MapAllocatorCache {
563625
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
564626
Quarantine GUARDED_BY(Mutex) = {};
565627

566-
// The LRUHead of the cache is the most recently used cache entry
567-
u16 LRUHead GUARDED_BY(Mutex) = 0;
568-
// The LRUTail of the cache is the least recently used cache entry
569-
u16 LRUTail GUARDED_BY(Mutex) = 0;
628+
// EntryLists stores the head and tail indices of all
629+
// lists being used to store valid cache entries.
630+
// Currently there are lists storing COMMITTED and DECOMMITTED entries.
631+
// COMMITTED entries have memory chunks that have not been released to the OS
632+
// DECOMMITTED entries have memory chunks that have been released to the OS
633+
ListInfo EntryLists[2] GUARDED_BY(Mutex) = {};
570634
// The AvailableHead is the top of the stack of available entries
571635
u16 AvailableHead GUARDED_BY(Mutex) = 0;
572636
};
@@ -706,6 +770,7 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
706770
}
707771
return Ptr;
708772
}
773+
709774
// As with the Primary, the size passed to this function includes any desired
710775
// alignment, so that the frontend can align the user allocation. The hint
711776
// parameter allows us to unmap spurious memory when dealing with larger

0 commit comments

Comments
 (0)