19
19
#include " stats.h"
20
20
#include " string_utils.h"
21
21
#include " thread_annotations.h"
22
+ #include " vector.h"
22
23
23
24
namespace scudo {
24
25
@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
73
74
}
74
75
75
76
namespace {
77
+
76
78
struct CachedBlock {
79
+ static constexpr u16 CacheIndexMax = UINT16_MAX;
80
+ static constexpr u16 InvalidEntry = CacheIndexMax;
81
+
77
82
uptr CommitBase = 0 ;
78
83
uptr CommitSize = 0 ;
79
84
uptr BlockBegin = 0 ;
80
85
MemMapT MemMap = {};
81
86
u64 Time = 0 ;
87
+ u16 Next = 0 ;
88
+ u16 Prev = 0 ;
82
89
83
90
bool isValid () { return CommitBase != 0 ; }
84
91
@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188
195
Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189
196
" (%zu.%02zu%%)\n " ,
190
197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191
- for (CachedBlock Entry : Entries) {
192
- if (!Entry.isValid ())
193
- continue ;
194
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198
+ Str->append (" Cache Entry Dump (Most Recent -> Least Recent):\n " );
199
+
200
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
+ CachedBlock &Entry = Entries[I];
202
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195
203
" BlockSize: %zu %s\n " ,
196
204
Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
197
205
Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202
210
static_assert (Config::getDefaultMaxEntriesCount() <=
203
211
Config::getEntriesArraySize (),
204
212
" " );
213
+ // Ensure the cache entry array size fits in the LRU list Next and Prev
214
+ // index fields
215
+ static_assert (Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216
+ " " );
205
217
206
218
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207
219
DCHECK_EQ (EntriesCount, 0U );
@@ -213,23 +225,35 @@ template <typename Config> class MapAllocatorCache {
213
225
if (Config::getDefaultReleaseToOsIntervalMs () != INT32_MIN)
214
226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
215
227
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
+
229
+ // The cache is initially empty
230
+ LRUHead = CachedBlock::InvalidEntry;
231
+ LRUTail = CachedBlock::InvalidEntry;
232
+
233
+ // Available entries will be retrieved starting from the beginning of the
234
+ // Entries array
235
+ AvailableHead = 0 ;
236
+ for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
237
+ Entries[I].Next = static_cast <u16 >(I + 1 );
238
+
239
+ Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
216
240
}
217
241
218
242
void store (const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219
243
if (!canCache (H->CommitSize ))
220
244
return unmap (H);
221
245
222
- bool EntryCached = false ;
223
- bool EmptyCache = false ;
224
246
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
225
- const u64 Time = getMonotonicTimeFast ();
226
247
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
248
+ u64 Time;
227
249
CachedBlock Entry;
250
+ Vector<MemMapT, 1U > EvictionMemMaps;
251
+
228
252
Entry.CommitBase = H->CommitBase ;
229
253
Entry.CommitSize = H->CommitSize ;
230
254
Entry.BlockBegin = reinterpret_cast <uptr>(H + 1 );
231
255
Entry.MemMap = H->MemMap ;
232
- Entry.Time = Time ;
256
+ Entry.Time = UINT64_MAX ;
233
257
if (useMemoryTagging<Config>(Options)) {
234
258
if (Interval == 0 && !SCUDO_FUCHSIA) {
235
259
// Release the memory and make it inaccessible at the same time by
@@ -249,11 +273,17 @@ template <typename Config> class MapAllocatorCache {
249
273
}
250
274
do {
251
275
ScopedLock L (Mutex);
276
+
277
+ Time = getMonotonicTimeFast ();
278
+ if (Entry.Time != 0 )
279
+ Entry.Time = Time;
280
+
252
281
if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U ) {
253
282
// If we get here then memory tagging was disabled in between when we
254
283
// read Options and when we locked Mutex. We can't insert our entry into
255
284
// the quarantine or the cache because the permissions would be wrong so
256
285
// just unmap it.
286
+ Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
257
287
break ;
258
288
}
259
289
if (Config::getQuarantineSize () && useMemoryTagging<Config>(Options)) {
@@ -269,30 +299,27 @@ template <typename Config> class MapAllocatorCache {
269
299
OldestTime = Entry.Time ;
270
300
Entry = PrevEntry;
271
301
}
272
- if (EntriesCount >= MaxCount) {
273
- if (IsFullEvents++ == 4U )
274
- EmptyCache = true ;
275
- } else {
276
- for (u32 I = 0 ; I < MaxCount; I++) {
277
- if (Entries[I].isValid ())
278
- continue ;
279
- if (I != 0 )
280
- Entries[I] = Entries[0 ];
281
- Entries[0 ] = Entry;
282
- EntriesCount++;
283
- if (OldestTime == 0 )
284
- OldestTime = Entry.Time ;
285
- EntryCached = true ;
286
- break ;
287
- }
302
+
303
+ // All excess entries are evicted from the cache
304
+ while (EntriesCount >= MaxCount) {
305
+ // Save MemMaps of evicted entries to perform unmap outside of lock
306
+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
307
+ remove (LRUTail);
288
308
}
309
+
310
+ insert (Entry);
311
+
312
+ if (OldestTime == 0 )
313
+ OldestTime = Entry.Time ;
289
314
} while (0 );
290
- if (EmptyCache)
291
- empty ();
292
- else if (Interval >= 0 )
315
+
316
+ for (MemMapT &EvictMemMap : EvictionMemMaps)
317
+ EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
318
+
319
+ if (Interval >= 0 ) {
320
+ // TODO: Add ReleaseToOS logic to LRU algorithm
293
321
releaseOlderThan (Time - static_cast <u64 >(Interval) * 1000000 );
294
- if (!EntryCached)
295
- Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
322
+ }
296
323
}
297
324
298
325
bool retrieve (Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +339,8 @@ template <typename Config> class MapAllocatorCache {
312
339
return false ;
313
340
u32 OptimalFitIndex = 0 ;
314
341
uptr MinDiff = UINTPTR_MAX;
315
- for (u32 I = 0 ; I < MaxCount; I++) {
316
- if (!Entries[I].isValid ())
317
- continue ;
342
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
343
+ I = Entries[I].Next ) {
318
344
const uptr CommitBase = Entries[I].CommitBase ;
319
345
const uptr CommitSize = Entries[I].CommitSize ;
320
346
const uptr AllocPos =
@@ -347,8 +373,7 @@ template <typename Config> class MapAllocatorCache {
347
373
}
348
374
if (Found) {
349
375
Entry = Entries[OptimalFitIndex];
350
- Entries[OptimalFitIndex].invalidate ();
351
- EntriesCount--;
376
+ remove (OptimalFitIndex);
352
377
SuccessfulRetrieves++;
353
378
}
354
379
}
@@ -410,19 +435,17 @@ template <typename Config> class MapAllocatorCache {
410
435
411
436
void disableMemoryTagging () EXCLUDES(Mutex) {
412
437
ScopedLock L (Mutex);
413
- for (u32 I = 0 ; I != Config::getQuarantineSize (); ++I ) {
438
+ for (u32 I = 0 ; I != Config::getQuarantineSize (); I++ ) {
414
439
if (Quarantine[I].isValid ()) {
415
440
MemMapT &MemMap = Quarantine[I].MemMap ;
416
441
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
417
442
Quarantine[I].invalidate ();
418
443
}
419
444
}
420
445
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
421
- for (u32 I = 0 ; I < MaxCount; I++) {
422
- if (Entries[I].isValid ()) {
423
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
424
- Entries[I].CommitSize , 0 );
425
- }
446
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
447
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
448
+ Entries[I].CommitSize , 0 );
426
449
}
427
450
QuarantinePos = -1U ;
428
451
}
@@ -434,6 +457,62 @@ template <typename Config> class MapAllocatorCache {
434
457
void unmapTestOnly () { empty (); }
435
458
436
459
private:
460
+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
461
+ DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
462
+
463
+ // Cache should be populated with valid entries when not empty
464
+ DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
465
+
466
+ u32 FreeIndex = AvailableHead;
467
+ AvailableHead = Entries[AvailableHead].Next ;
468
+
469
+ if (EntriesCount == 0 ) {
470
+ LRUTail = static_cast <u16 >(FreeIndex);
471
+ } else {
472
+ // Check list order
473
+ if (EntriesCount > 1 )
474
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
475
+ Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
476
+ }
477
+
478
+ Entries[FreeIndex] = Entry;
479
+ Entries[FreeIndex].Next = LRUHead;
480
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
481
+ LRUHead = static_cast <u16 >(FreeIndex);
482
+ EntriesCount++;
483
+
484
+ // Availability stack should not have available entries when all entries
485
+ // are in use
486
+ if (EntriesCount == Config::getEntriesArraySize ())
487
+ DCHECK (AvailableHead == CachedBlock::InvalidEntry);
488
+ }
489
+
490
+ void remove (uptr I) REQUIRES(Mutex) {
491
+ DCHECK (Entries[I].isValid ());
492
+
493
+ Entries[I].invalidate ();
494
+
495
+ if (I == LRUHead)
496
+ LRUHead = Entries[I].Next ;
497
+ else
498
+ Entries[Entries[I].Prev ].Next = Entries[I].Next ;
499
+
500
+ if (I == LRUTail)
501
+ LRUTail = Entries[I].Prev ;
502
+ else
503
+ Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
504
+
505
+ Entries[I].Next = AvailableHead;
506
+ AvailableHead = static_cast <u16 >(I);
507
+ EntriesCount--;
508
+
509
+ // Cache should not have valid entries when not empty
510
+ if (EntriesCount == 0 ) {
511
+ DCHECK (LRUHead == CachedBlock::InvalidEntry);
512
+ DCHECK (LRUTail == CachedBlock::InvalidEntry);
513
+ }
514
+ }
515
+
437
516
void empty () {
438
517
MemMapT MapInfo[Config::getEntriesArraySize ()];
439
518
uptr N = 0 ;
@@ -447,7 +526,6 @@ template <typename Config> class MapAllocatorCache {
447
526
N++;
448
527
}
449
528
EntriesCount = 0 ;
450
- IsFullEvents = 0 ;
451
529
}
452
530
for (uptr I = 0 ; I < N; I++) {
453
531
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +562,20 @@ template <typename Config> class MapAllocatorCache {
484
562
atomic_u32 MaxEntriesCount = {};
485
563
atomic_uptr MaxEntrySize = {};
486
564
u64 OldestTime GUARDED_BY (Mutex) = 0;
487
- u32 IsFullEvents GUARDED_BY (Mutex) = 0;
488
565
atomic_s32 ReleaseToOsIntervalMs = {};
489
566
u32 CallsToRetrieve GUARDED_BY (Mutex) = 0;
490
567
u32 SuccessfulRetrieves GUARDED_BY (Mutex) = 0;
491
568
492
569
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493
570
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494
571
Quarantine GUARDED_BY (Mutex) = {};
572
+
573
+ // The LRUHead of the cache is the most recently used cache entry
574
+ // The LRUTail of the cache is the least recently used cache entry
575
+ // The AvailableHead is the top of the stack of available entries
576
+ u16 LRUHead GUARDED_BY (Mutex) = 0;
577
+ u16 LRUTail GUARDED_BY (Mutex) = 0;
578
+ u16 AvailableHead GUARDED_BY (Mutex) = 0;
495
579
};
496
580
497
581
template <typename Config> class MapAllocator {
0 commit comments