@@ -279,41 +279,64 @@ template <typename Config> class MapAllocatorCache {
279
279
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
280
280
const uptr PageSize = getPageSizeCached ();
281
281
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
282
+ // 10% of the requested size proved to be the optimal choice for
283
+ // retrieving cached blocks after testing several options.
284
+ constexpr u32 FragmentedBytesDivisor = 10 ;
282
285
bool Found = false ;
283
286
CachedBlock Entry;
284
- uptr HeaderPos = 0 ;
287
+ uptr EntryHeaderPos = 0 ;
285
288
{
286
289
ScopedLock L (Mutex);
287
290
CallsToRetrieve++;
288
291
if (EntriesCount == 0 )
289
292
return false ;
293
+ u32 OptimalFitIndex = 0 ;
294
+ uptr MinDiff = UINTPTR_MAX;
290
295
for (u32 I = 0 ; I < MaxCount; I++) {
291
296
if (!Entries[I].isValid ())
292
297
continue ;
293
298
const uptr CommitBase = Entries[I].CommitBase ;
294
299
const uptr CommitSize = Entries[I].CommitSize ;
295
300
const uptr AllocPos =
296
301
roundDown (CommitBase + CommitSize - Size, Alignment);
297
- HeaderPos = AllocPos - HeadersSize;
302
+ const uptr HeaderPos = AllocPos - HeadersSize;
298
303
if (HeaderPos > CommitBase + CommitSize)
299
304
continue ;
300
305
if (HeaderPos < CommitBase ||
301
306
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
302
307
continue ;
303
308
}
304
309
Found = true ;
305
- Entry = Entries[I];
306
- Entries[I].invalidate ();
310
+ const uptr Diff = HeaderPos - CommitBase;
311
+ // immediately use a cached block if it's size is close enough to the
312
+ // requested size.
313
+ const uptr MaxAllowedFragmentedBytes =
314
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
315
+ if (Diff <= MaxAllowedFragmentedBytes) {
316
+ OptimalFitIndex = I;
317
+ EntryHeaderPos = HeaderPos;
318
+ break ;
319
+ }
320
+ // keep track of the smallest cached block
321
+ // that is greater than (AllocSize + HeaderSize)
322
+ if (Diff > MinDiff)
323
+ continue ;
324
+ OptimalFitIndex = I;
325
+ MinDiff = Diff;
326
+ EntryHeaderPos = HeaderPos;
327
+ }
328
+ if (Found) {
329
+ Entry = Entries[OptimalFitIndex];
330
+ Entries[OptimalFitIndex].invalidate ();
307
331
EntriesCount--;
308
332
SuccessfulRetrieves++;
309
- break ;
310
333
}
311
334
}
312
335
if (!Found)
313
336
return false ;
314
337
315
338
*H = reinterpret_cast <LargeBlock::Header *>(
316
- LargeBlock::addHeaderTag<Config>(HeaderPos ));
339
+ LargeBlock::addHeaderTag<Config>(EntryHeaderPos ));
317
340
*Zeroed = Entry.Time == 0 ;
318
341
if (useMemoryTagging<Config>(Options))
319
342
Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
0 commit comments