19
19
#include " stats.h"
20
20
#include " string_utils.h"
21
21
#include " thread_annotations.h"
22
+ #include " vector.h"
22
23
23
24
namespace scudo {
24
25
@@ -73,12 +74,18 @@ static inline void unmap(LargeBlock::Header *H) {
73
74
}
74
75
75
76
namespace {
77
+
76
78
struct CachedBlock {
79
+ static constexpr u16 CacheIndexMax = UINT16_MAX;
80
+ static constexpr u16 InvalidEntry = CacheIndexMax;
81
+
77
82
uptr CommitBase = 0 ;
78
83
uptr CommitSize = 0 ;
79
84
uptr BlockBegin = 0 ;
80
85
MemMapT MemMap = {};
81
86
u64 Time = 0 ;
87
+ u16 Next = 0 ;
88
+ u16 Prev = 0 ;
82
89
83
90
bool isValid () { return CommitBase != 0 ; }
84
91
@@ -188,10 +195,11 @@ template <typename Config> class MapAllocatorCache {
188
195
Str->append (" Stats: CacheRetrievalStats: SuccessRate: %u/%u "
189
196
" (%zu.%02zu%%)\n " ,
190
197
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
191
- for (CachedBlock Entry : Entries) {
192
- if (!Entry.isValid ())
193
- continue ;
194
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
198
+ Str->append (" Cache Entry Dump (Most Recent -> Least Recent):\n " );
199
+
200
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
+ CachedBlock &Entry = Entries[I];
202
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
195
203
" BlockSize: %zu %s\n " ,
196
204
Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
197
205
Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
@@ -202,6 +210,10 @@ template <typename Config> class MapAllocatorCache {
202
210
static_assert (Config::getDefaultMaxEntriesCount() <=
203
211
Config::getEntriesArraySize (),
204
212
" " );
213
+ // Ensure the cache entry array size fits in the LRU list Next and Prev
214
+ // index fields
215
+ static_assert (Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
216
+ " " );
205
217
206
218
void init (s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
207
219
DCHECK_EQ (EntriesCount, 0U );
@@ -213,18 +225,30 @@ template <typename Config> class MapAllocatorCache {
213
225
if (Config::getDefaultReleaseToOsIntervalMs () != INT32_MIN)
214
226
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs ();
215
227
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
+
229
+ // The cache is initially empty
230
+ LRUHead = CachedBlock::InvalidEntry;
231
+ LRUTail = CachedBlock::InvalidEntry;
232
+
233
+ // Available entries will be retrieved starting from the beginning of the
234
+ // Entries array
235
+ AvailableHead = 0 ;
236
+ for (u32 I = 0 ; I < Config::getEntriesArraySize () - 1 ; I++)
237
+ Entries[I].Next = static_cast <u16 >(I + 1 );
238
+
239
+ Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
216
240
}
217
241
218
242
void store (const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
219
243
if (!canCache (H->CommitSize ))
220
244
return unmap (H);
221
245
222
- bool EntryCached = false ;
223
- bool EmptyCache = false ;
224
246
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
225
247
const u64 Time = getMonotonicTimeFast ();
226
248
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
227
249
CachedBlock Entry;
250
+ Vector<MemMapT, 1U > EvictionMemMaps;
251
+
228
252
Entry.CommitBase = H->CommitBase ;
229
253
Entry.CommitSize = H->CommitSize ;
230
254
Entry.BlockBegin = reinterpret_cast <uptr>(H + 1 );
@@ -254,6 +278,7 @@ template <typename Config> class MapAllocatorCache {
254
278
// read Options and when we locked Mutex. We can't insert our entry into
255
279
// the quarantine or the cache because the permissions would be wrong so
256
280
// just unmap it.
281
+ Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
257
282
break ;
258
283
}
259
284
if (Config::getQuarantineSize () && useMemoryTagging<Config>(Options)) {
@@ -269,30 +294,27 @@ template <typename Config> class MapAllocatorCache {
269
294
OldestTime = Entry.Time ;
270
295
Entry = PrevEntry;
271
296
}
272
- if (EntriesCount >= MaxCount) {
273
- if (IsFullEvents++ == 4U )
274
- EmptyCache = true ;
275
- } else {
276
- for (u32 I = 0 ; I < MaxCount; I++) {
277
- if (Entries[I].isValid ())
278
- continue ;
279
- if (I != 0 )
280
- Entries[I] = Entries[0 ];
281
- Entries[0 ] = Entry;
282
- EntriesCount++;
283
- if (OldestTime == 0 )
284
- OldestTime = Entry.Time ;
285
- EntryCached = true ;
286
- break ;
287
- }
297
+
298
+ // All excess entries are evicted from the cache
299
+ while (EntriesCount >= MaxCount) {
300
+ // Save MemMaps of evicted entries to perform unmap outside of lock
301
+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
302
+ remove (LRUTail);
288
303
}
304
+
305
+ insert (Entry);
306
+
307
+ if (OldestTime == 0 )
308
+ OldestTime = Entry.Time ;
289
309
} while (0 );
290
- if (EmptyCache)
291
- empty ();
292
- else if (Interval >= 0 )
310
+
311
+ for (MemMapT &EvictMemMap : EvictionMemMaps)
312
+ EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
313
+
314
+ if (Interval >= 0 ) {
315
+ // TODO: Add ReleaseToOS logic to LRU algorithm
293
316
releaseOlderThan (Time - static_cast <u64 >(Interval) * 1000000 );
294
- if (!EntryCached)
295
- Entry.MemMap .unmap (Entry.MemMap .getBase (), Entry.MemMap .getCapacity ());
317
+ }
296
318
}
297
319
298
320
bool retrieve (Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
@@ -312,9 +334,8 @@ template <typename Config> class MapAllocatorCache {
312
334
return false ;
313
335
u32 OptimalFitIndex = 0 ;
314
336
uptr MinDiff = UINTPTR_MAX;
315
- for (u32 I = 0 ; I < MaxCount; I++) {
316
- if (!Entries[I].isValid ())
317
- continue ;
337
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
338
+ I = Entries[I].Next ) {
318
339
const uptr CommitBase = Entries[I].CommitBase ;
319
340
const uptr CommitSize = Entries[I].CommitSize ;
320
341
const uptr AllocPos =
@@ -347,8 +368,7 @@ template <typename Config> class MapAllocatorCache {
347
368
}
348
369
if (Found) {
349
370
Entry = Entries[OptimalFitIndex];
350
- Entries[OptimalFitIndex].invalidate ();
351
- EntriesCount--;
371
+ remove (OptimalFitIndex);
352
372
SuccessfulRetrieves++;
353
373
}
354
374
}
@@ -410,19 +430,17 @@ template <typename Config> class MapAllocatorCache {
410
430
411
431
void disableMemoryTagging () EXCLUDES(Mutex) {
412
432
ScopedLock L (Mutex);
413
- for (u32 I = 0 ; I != Config::getQuarantineSize (); ++I ) {
433
+ for (u32 I = 0 ; I != Config::getQuarantineSize (); I++ ) {
414
434
if (Quarantine[I].isValid ()) {
415
435
MemMapT &MemMap = Quarantine[I].MemMap ;
416
436
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
417
437
Quarantine[I].invalidate ();
418
438
}
419
439
}
420
440
const u32 MaxCount = atomic_load_relaxed (&MaxEntriesCount);
421
- for (u32 I = 0 ; I < MaxCount; I++) {
422
- if (Entries[I].isValid ()) {
423
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
424
- Entries[I].CommitSize , 0 );
425
- }
441
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
442
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
443
+ Entries[I].CommitSize , 0 );
426
444
}
427
445
QuarantinePos = -1U ;
428
446
}
@@ -434,6 +452,62 @@ template <typename Config> class MapAllocatorCache {
434
452
void unmapTestOnly () { empty (); }
435
453
436
454
private:
455
+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
456
+ DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
457
+
458
+ // Cache should be populated with valid entries when not empty
459
+ DCHECK_NE (AvailableHead, CachedBlock::InvalidEntry);
460
+
461
+ u32 FreeIndex = AvailableHead;
462
+ AvailableHead = Entries[AvailableHead].Next ;
463
+
464
+ if (EntriesCount == 0 ) {
465
+ LRUTail = static_cast <u16 >(FreeIndex);
466
+ } else {
467
+ // Check list order
468
+ if (EntriesCount > 1 )
469
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
470
+ Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
471
+ }
472
+
473
+ Entries[FreeIndex] = Entry;
474
+ Entries[FreeIndex].Next = LRUHead;
475
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
476
+ LRUHead = static_cast <u16 >(FreeIndex);
477
+ EntriesCount++;
478
+
479
+ // Availability stack should not have available entries when all entries
480
+ // are in use
481
+ if (EntriesCount == Config::getEntriesArraySize ())
482
+ DCHECK (AvailableHead == CachedBlock::InvalidEntry);
483
+ }
484
+
485
+ void remove (uptr I) REQUIRES(Mutex) {
486
+ DCHECK (Entries[I].isValid ());
487
+
488
+ Entries[I].invalidate ();
489
+
490
+ if (I == LRUHead)
491
+ LRUHead = Entries[I].Next ;
492
+ else
493
+ Entries[Entries[I].Prev ].Next = Entries[I].Next ;
494
+
495
+ if (I == LRUTail)
496
+ LRUTail = Entries[I].Prev ;
497
+ else
498
+ Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
499
+
500
+ Entries[I].Next = AvailableHead;
501
+ AvailableHead = static_cast <u16 >(I);
502
+ EntriesCount--;
503
+
504
+ // Cache should not have valid entries when not empty
505
+ if (EntriesCount == 0 ) {
506
+ DCHECK (LRUHead == CachedBlock::InvalidEntry);
507
+ DCHECK (LRUTail == CachedBlock::InvalidEntry);
508
+ }
509
+ }
510
+
437
511
void empty () {
438
512
MemMapT MapInfo[Config::getEntriesArraySize ()];
439
513
uptr N = 0 ;
@@ -447,7 +521,6 @@ template <typename Config> class MapAllocatorCache {
447
521
N++;
448
522
}
449
523
EntriesCount = 0 ;
450
- IsFullEvents = 0 ;
451
524
}
452
525
for (uptr I = 0 ; I < N; I++) {
453
526
MemMapT &MemMap = MapInfo[I];
@@ -484,14 +557,20 @@ template <typename Config> class MapAllocatorCache {
484
557
atomic_u32 MaxEntriesCount = {};
485
558
atomic_uptr MaxEntrySize = {};
486
559
u64 OldestTime GUARDED_BY (Mutex) = 0;
487
- u32 IsFullEvents GUARDED_BY (Mutex) = 0;
488
560
atomic_s32 ReleaseToOsIntervalMs = {};
489
561
u32 CallsToRetrieve GUARDED_BY (Mutex) = 0;
490
562
u32 SuccessfulRetrieves GUARDED_BY (Mutex) = 0;
491
563
492
564
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
493
565
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
494
566
Quarantine GUARDED_BY (Mutex) = {};
567
+
568
+ // The LRUHead of the cache is the most recently used cache entry
569
+ // The LRUTail of the cache is the least recently used cache entry
570
+ // The AvailableHead is the top of the stack of available entries
571
+ u16 LRUHead GUARDED_BY (Mutex) = 0;
572
+ u16 LRUTail GUARDED_BY (Mutex) = 0;
573
+ u16 AvailableHead GUARDED_BY (Mutex) = 0;
495
574
};
496
575
497
576
template <typename Config> class MapAllocator {
0 commit comments