@@ -72,6 +72,14 @@ namespace {
72
72
struct CachedBlock {
73
73
static constexpr u16 CacheIndexMax = UINT16_MAX;
74
74
static constexpr u16 InvalidEntry = CacheIndexMax;
75
+ // * MaxReleasedCachePages default is currently 4
76
+ // - We arrived at this value after noticing that mapping
77
+ // in larger memory regions performs better than releasing
78
+ // memory and forcing a cache hit. According to the data,
79
+ // it suggests that beyond 4 pages, the release execution time is
80
+ // longer than the map execution time. In this way, the default
81
+ // is dependent on the platform.
82
+ static constexpr uptr MaxReleasedCachePages = 0U ;
75
83
76
84
uptr CommitBase = 0 ;
77
85
uptr CommitSize = 0 ;
@@ -90,8 +98,9 @@ struct CachedBlock {
90
98
template <typename Config> class MapAllocatorNoCache {
91
99
public:
92
100
void init (UNUSED s32 ReleaseToOsInterval) {}
93
- CachedBlock retrieve (UNUSED uptr Size, UNUSED uptr Alignment,
94
- UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
101
+ CachedBlock retrieve (UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size,
102
+ UNUSED uptr Alignment, UNUSED uptr HeadersSize,
103
+ UNUSED uptr &EntryHeaderPos) {
95
104
return {};
96
105
}
97
106
void store (UNUSED Options Options, UNUSED uptr CommitBase,
@@ -121,7 +130,7 @@ template <typename Config> class MapAllocatorNoCache {
121
130
}
122
131
};
123
132
124
- static const uptr MaxUnusedCachePages = 4U ;
133
+ static const uptr MaxUnreleasedCachePages = 4U ;
125
134
126
135
template <typename Config>
127
136
bool mapSecondary (const Options &Options, uptr CommitBase, uptr CommitSize,
@@ -151,9 +160,11 @@ bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
151
160
}
152
161
}
153
162
154
- const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
155
- if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
156
- const uptr UntaggedPos = Max (AllocPos, CommitBase + MaxUnusedCacheBytes);
163
+ const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
164
+ if (useMemoryTagging<Config>(Options) &&
165
+ CommitSize > MaxUnreleasedCacheBytes) {
166
+ const uptr UntaggedPos =
167
+ Max (AllocPos, CommitBase + MaxUnreleasedCacheBytes);
157
168
return MemMap.remap (CommitBase, UntaggedPos - CommitBase, " scudo:secondary" ,
158
169
MAP_MEMTAG | Flags) &&
159
170
MemMap.remap (UntaggedPos, CommitBase + CommitSize - UntaggedPos,
@@ -334,61 +345,112 @@ class MapAllocatorCache {
334
345
}
335
346
}
336
347
337
- CachedBlock retrieve (uptr Size, uptr Alignment, uptr HeadersSize,
338
- uptr &EntryHeaderPos) EXCLUDES(Mutex) {
348
+ CachedBlock retrieve (uptr MaxAllowedFragmentedPages, uptr Size,
349
+ uptr Alignment, uptr HeadersSize, uptr &EntryHeaderPos)
350
+ EXCLUDES(Mutex) {
339
351
const uptr PageSize = getPageSizeCached ();
340
352
// 10% of the requested size proved to be the optimal choice for
341
353
// retrieving cached blocks after testing several options.
342
354
constexpr u32 FragmentedBytesDivisor = 10 ;
343
- bool Found = false ;
344
355
CachedBlock Entry;
345
356
EntryHeaderPos = 0 ;
346
357
{
347
358
ScopedLock L (Mutex);
348
359
CallsToRetrieve++;
349
360
if (EntriesCount == 0 )
350
361
return {};
351
- u32 OptimalFitIndex = 0 ;
362
+ u16 RetrievedIndex = CachedBlock::InvalidEntry ;
352
363
uptr MinDiff = UINTPTR_MAX;
353
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
364
+
365
+ // Since allocation sizes don't always match cached memory chunk sizes
366
+ // we allow some memory to be unused (called fragmented bytes). The
367
+ // amount of unused bytes is exactly EntryHeaderPos - CommitBase.
368
+ //
369
+ // CommitBase CommitBase + CommitSize
370
+ // V V
371
+ // +---+------------+-----------------+---+
372
+ // | | | | |
373
+ // +---+------------+-----------------+---+
374
+ // ^ ^ ^
375
+ // Guard EntryHeaderPos Guard-page-end
376
+ // page-begin
377
+ //
378
+ // [EntryHeaderPos, CommitBase + CommitSize) contains the user data as
379
+ // well as the header metadata. If EntryHeaderPos - CommitBase exceeds
380
+ // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
381
+ // not considered valid for retrieval.
382
+ for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
354
383
I = Entries[I].Next ) {
355
384
const uptr CommitBase = Entries[I].CommitBase ;
356
385
const uptr CommitSize = Entries[I].CommitSize ;
357
386
const uptr AllocPos =
358
387
roundDown (CommitBase + CommitSize - Size, Alignment);
359
388
const uptr HeaderPos = AllocPos - HeadersSize;
389
+ const uptr MaxAllowedFragmentedBytes =
390
+ MaxAllowedFragmentedPages * PageSize;
360
391
if (HeaderPos > CommitBase + CommitSize)
361
392
continue ;
362
393
if (HeaderPos < CommitBase ||
363
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages ) {
394
+ AllocPos > CommitBase + MaxAllowedFragmentedBytes ) {
364
395
continue ;
365
396
}
366
- Found = true ;
367
- const uptr Diff = HeaderPos - CommitBase;
368
- // immediately use a cached block if it's size is close enough to the
369
- // requested size.
370
- const uptr MaxAllowedFragmentedBytes =
371
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
372
- if (Diff <= MaxAllowedFragmentedBytes) {
373
- OptimalFitIndex = I;
374
- EntryHeaderPos = HeaderPos;
375
- break ;
376
- }
377
- // keep track of the smallest cached block
397
+
398
+ const uptr Diff = roundDown (HeaderPos, PageSize) - CommitBase;
399
+
400
+ // Keep track of the smallest cached block
378
401
// that is greater than (AllocSize + HeaderSize)
379
- if (Diff > MinDiff)
402
+ if (Diff >= MinDiff)
380
403
continue ;
381
- OptimalFitIndex = I;
404
+
382
405
MinDiff = Diff;
406
+ RetrievedIndex = I;
383
407
EntryHeaderPos = HeaderPos;
408
+
409
+ // Immediately use a cached block if its size is close enough to the
410
+ // requested size
411
+ const uptr OptimalFitThesholdBytes =
412
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
413
+ if (Diff <= OptimalFitThesholdBytes)
414
+ break ;
384
415
}
385
- if (Found ) {
386
- Entry = Entries[OptimalFitIndex ];
387
- remove (OptimalFitIndex );
416
+ if (RetrievedIndex != CachedBlock::InvalidEntry ) {
417
+ Entry = Entries[RetrievedIndex ];
418
+ remove (RetrievedIndex );
388
419
SuccessfulRetrieves++;
389
420
}
390
421
}
391
422
423
+ // The difference between the retrieved memory chunk and the request
424
+ // size is at most MaxAllowedFragmentedPages
425
+ //
426
+ // / MaxAllowedFragmentedPages * PageSize \
427
+ // +--------------------------+-------------+
428
+ // | | |
429
+ // +--------------------------+-------------+
430
+ // \ Bytes to be released / ^
431
+ // |
432
+ // (may or may not be committed)
433
+ //
434
+ // The maximum number of bytes released to the OS is capped by
435
+ // MaxReleasedCachePages
436
+ //
437
+ // TODO : Consider making MaxReleasedCachePages configurable since
438
+ // the release to OS API can vary across systems.
439
+ if (Entry.Time != 0 ) {
440
+ const uptr FragmentedBytes =
441
+ roundDown (EntryHeaderPos, PageSize) - Entry.CommitBase ;
442
+ const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
443
+ if (FragmentedBytes > MaxUnreleasedCacheBytes) {
444
+ const uptr MaxReleasedCacheBytes =
445
+ CachedBlock::MaxReleasedCachePages * PageSize;
446
+ uptr BytesToRelease =
447
+ roundUp (Min<uptr>(MaxReleasedCacheBytes,
448
+ FragmentedBytes - MaxUnreleasedCacheBytes),
449
+ PageSize);
450
+ Entry.MemMap .releaseAndZeroPagesToOS (Entry.CommitBase , BytesToRelease);
451
+ }
452
+ }
453
+
392
454
return Entry;
393
455
}
394
456
@@ -659,8 +721,18 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
659
721
FillContentsMode FillContents) {
660
722
CachedBlock Entry;
661
723
uptr EntryHeaderPos;
724
+ uptr MaxAllowedFragmentedPages;
725
+
726
+ if (LIKELY (!useMemoryTagging<Config>(Options))) {
727
+ MaxAllowedFragmentedPages =
728
+ MaxUnreleasedCachePages + CachedBlock::MaxReleasedCachePages;
729
+
730
+ } else {
731
+ MaxAllowedFragmentedPages = MaxUnreleasedCachePages;
732
+ }
662
733
663
- Entry = Cache.retrieve (Size, Alignment, getHeadersSize (), EntryHeaderPos);
734
+ Entry = Cache.retrieve (MaxAllowedFragmentedPages, Size, Alignment,
735
+ getHeadersSize (), EntryHeaderPos);
664
736
if (!Entry.isValid ())
665
737
return nullptr ;
666
738
0 commit comments