Skip to content

Commit a7f0195

Browse files
FernandoChiaHungDuan
authored andcommitted
[scudo] secondary allocator cache optimal-fit retrieval
changed cache retrieve algorithm to an "optimal-fit" which immediate returns blocks that are less than 110% of the requested size. This reduces memory waste while still allowing for an early return without traversing the entire array of cached blocks Reviewed By: cferris, Chia-hungDuan Differential Revision: https://reviews.llvm.org/D157155
1 parent c3f227e commit a7f0195

File tree

1 file changed

+29
-6
lines changed

1 file changed

+29
-6
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -279,41 +279,64 @@ template <typename Config> class MapAllocatorCache {
279279
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
280280
const uptr PageSize = getPageSizeCached();
281281
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
282+
// 10% of the requested size proved to be the optimal choice for
283+
// retrieving cached blocks after testing several options.
284+
constexpr u32 FragmentedBytesDivisor = 10;
282285
bool Found = false;
283286
CachedBlock Entry;
284-
uptr HeaderPos = 0;
287+
uptr EntryHeaderPos = 0;
285288
{
286289
ScopedLock L(Mutex);
287290
CallsToRetrieve++;
288291
if (EntriesCount == 0)
289292
return false;
293+
u32 OptimalFitIndex = 0;
294+
uptr MinDiff = UINTPTR_MAX;
290295
for (u32 I = 0; I < MaxCount; I++) {
291296
if (!Entries[I].isValid())
292297
continue;
293298
const uptr CommitBase = Entries[I].CommitBase;
294299
const uptr CommitSize = Entries[I].CommitSize;
295300
const uptr AllocPos =
296301
roundDown(CommitBase + CommitSize - Size, Alignment);
297-
HeaderPos = AllocPos - HeadersSize;
302+
const uptr HeaderPos = AllocPos - HeadersSize;
298303
if (HeaderPos > CommitBase + CommitSize)
299304
continue;
300305
if (HeaderPos < CommitBase ||
301306
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
302307
continue;
303308
}
304309
Found = true;
305-
Entry = Entries[I];
306-
Entries[I].invalidate();
310+
const uptr Diff = HeaderPos - CommitBase;
311+
// immediately use a cached block if it's size is close enough to the
312+
// requested size.
313+
const uptr MaxAllowedFragmentedBytes =
314+
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
315+
if (Diff <= MaxAllowedFragmentedBytes) {
316+
OptimalFitIndex = I;
317+
EntryHeaderPos = HeaderPos;
318+
break;
319+
}
320+
// keep track of the smallest cached block
321+
// that is greater than (AllocSize + HeaderSize)
322+
if (Diff > MinDiff)
323+
continue;
324+
OptimalFitIndex = I;
325+
MinDiff = Diff;
326+
EntryHeaderPos = HeaderPos;
327+
}
328+
if (Found) {
329+
Entry = Entries[OptimalFitIndex];
330+
Entries[OptimalFitIndex].invalidate();
307331
EntriesCount--;
308332
SuccessfulRetrieves++;
309-
break;
310333
}
311334
}
312335
if (!Found)
313336
return false;
314337

315338
*H = reinterpret_cast<LargeBlock::Header *>(
316-
LargeBlock::addHeaderTag<Config>(HeaderPos));
339+
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
317340
*Zeroed = Entry.Time == 0;
318341
if (useMemoryTagging<Config>(Options))
319342
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);

0 commit comments

Comments
 (0)