@@ -184,6 +184,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
184
184
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
185
185
class MapAllocatorCache {
186
186
public:
187
+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
188
+
189
+ // TODO: Refactor the intrusive list to support non-pointer link type
190
+ typedef struct {
191
+ u16 Head;
192
+ u16 Tail;
193
+ } ListInfo;
194
+
187
195
void getStats (ScopedString *Str) {
188
196
ScopedLock L (Mutex);
189
197
uptr Integral;
@@ -201,13 +209,18 @@ class MapAllocatorCache {
201
209
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
202
210
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
203
211
204
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
205
- CachedBlock &Entry = Entries[I];
206
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
207
- " BlockSize: %zu %s\n " ,
208
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
209
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
210
- }
212
+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
213
+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
214
+ I = Entries[I].Next ) {
215
+ CachedBlock &Entry = Entries[I];
216
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
217
+ " BlockSize: %zu %s\n " ,
218
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
219
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
220
+ }
221
+ };
222
+ printList (COMMITTED);
223
+ printList (DECOMMITTED);
211
224
}
212
225
213
226
// Ensure the default maximum specified fits the array.
@@ -231,8 +244,10 @@ class MapAllocatorCache {
231
244
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
232
245
233
246
// The cache is initially empty
234
- LRUHead = CachedBlock::InvalidEntry;
235
- LRUTail = CachedBlock::InvalidEntry;
247
+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
248
+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
249
+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
250
+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
236
251
237
252
// Available entries will be retrieved starting from the beginning of the
238
253
// Entries array
@@ -250,7 +265,6 @@ class MapAllocatorCache {
250
265
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
251
266
u64 Time;
252
267
CachedBlock Entry;
253
-
254
268
Entry.CommitBase = CommitBase;
255
269
Entry.CommitSize = CommitSize;
256
270
Entry.BlockBegin = BlockBegin;
@@ -314,16 +328,22 @@ class MapAllocatorCache {
314
328
315
329
// All excess entries are evicted from the cache
316
330
while (needToEvict ()) {
331
+ EntryListT EvictionListType;
332
+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
333
+ EvictionListType = COMMITTED;
334
+ else
335
+ EvictionListType = DECOMMITTED;
317
336
// Save MemMaps of evicted entries to perform unmap outside of lock
318
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
319
- remove (LRUTail);
337
+ EvictionMemMaps.push_back (
338
+ Entries[EntryLists[EvictionListType].Tail ].MemMap );
339
+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
320
340
}
321
341
322
- insert (Entry);
342
+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
323
343
324
344
if (OldestTime == 0 )
325
345
OldestTime = Entry.Time ;
326
- } while (0 );
346
+ } while (0 ); // ScopedLock L(Mutex);
327
347
328
348
for (MemMapT &EvictMemMap : EvictionMemMaps)
329
349
unmapCallBack (EvictMemMap);
@@ -340,17 +360,14 @@ class MapAllocatorCache {
340
360
// 10% of the requested size proved to be the optimal choice for
341
361
// retrieving cached blocks after testing several options.
342
362
constexpr u32 FragmentedBytesDivisor = 10 ;
343
- bool Found = false ;
344
363
CachedBlock Entry;
364
+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
365
+ uptr MinDiff = UINTPTR_MAX;
366
+ EntryListT OptimalFitListType = NONE;
345
367
EntryHeaderPos = 0 ;
346
- {
347
- ScopedLock L (Mutex);
348
- CallsToRetrieve++;
349
- if (EntriesCount == 0 )
350
- return {};
351
- u32 OptimalFitIndex = 0 ;
352
- uptr MinDiff = UINTPTR_MAX;
353
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
368
+
369
+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
370
+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
354
371
I = Entries[I].Next ) {
355
372
const uptr CommitBase = Entries[I].CommitBase ;
356
373
const uptr CommitSize = Entries[I].CommitSize ;
@@ -360,34 +377,48 @@ class MapAllocatorCache {
360
377
if (HeaderPos > CommitBase + CommitSize)
361
378
continue ;
362
379
if (HeaderPos < CommitBase ||
363
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
380
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
364
381
continue ;
365
- }
366
- Found = true ;
382
+
367
383
const uptr Diff = HeaderPos - CommitBase;
368
- // immediately use a cached block if it's size is close enough to the
369
- // requested size.
384
+ // immediately use a cached block if it's size is close enough to
385
+ // the requested size.
370
386
const uptr MaxAllowedFragmentedBytes =
371
387
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
372
388
if (Diff <= MaxAllowedFragmentedBytes) {
373
389
OptimalFitIndex = I;
374
390
EntryHeaderPos = HeaderPos;
375
- break ;
391
+ OptimalFitListType = ListType;
392
+ return true ;
376
393
}
394
+
377
395
// keep track of the smallest cached block
378
396
// that is greater than (AllocSize + HeaderSize)
379
397
if (Diff > MinDiff)
380
398
continue ;
381
399
OptimalFitIndex = I;
382
400
MinDiff = Diff;
401
+ OptimalFitListType = ListType;
383
402
EntryHeaderPos = HeaderPos;
384
403
}
385
- if (Found) {
386
- Entry = Entries[OptimalFitIndex];
387
- remove (OptimalFitIndex);
388
- SuccessfulRetrieves++;
389
- }
390
- }
404
+ return (OptimalFitIndex != CachedBlock::InvalidEntry);
405
+ };
406
+
407
+ {
408
+ ScopedLock L (Mutex);
409
+ CallsToRetrieve++;
410
+ if (EntriesCount == 0 )
411
+ return {};
412
+
413
+ // Prioritize valid fit from COMMITTED entries over
414
+ // optimal fit from DECOMMITTED entries
415
+ if (!FindAvailableEntry (COMMITTED) && !FindAvailableEntry (DECOMMITTED))
416
+ return {};
417
+
418
+ Entry = Entries[OptimalFitIndex];
419
+ remove (OptimalFitIndex, OptimalFitListType);
420
+ SuccessfulRetrieves++;
421
+ } // ScopedLock L(Mutex);
391
422
392
423
return Entry;
393
424
}
@@ -432,10 +463,15 @@ class MapAllocatorCache {
432
463
Quarantine[I].invalidate ();
433
464
}
434
465
}
435
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
436
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
437
- Entries[I].CommitSize , 0 );
438
- }
466
+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
467
+ for (u32 I = EntryLists[EntryList].Head ; I != CachedBlock::InvalidEntry;
468
+ I = Entries[I].Next ) {
469
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
470
+ Entries[I].CommitSize , 0 );
471
+ }
472
+ };
473
+ disableLists (COMMITTED);
474
+ disableLists (DECOMMITTED);
439
475
QuarantinePos = -1U ;
440
476
}
441
477
@@ -450,7 +486,7 @@ class MapAllocatorCache {
450
486
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
451
487
}
452
488
453
- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
489
+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
454
490
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
455
491
456
492
// Cache should be populated with valid entries when not empty
@@ -459,66 +495,86 @@ class MapAllocatorCache {
459
495
u32 FreeIndex = AvailableHead;
460
496
AvailableHead = Entries[AvailableHead].Next ;
461
497
462
- if (EntriesCount == 0 ) {
463
- LRUTail = static_cast <u16 >(FreeIndex);
464
- } else {
465
- // Check list order
466
- if (EntriesCount > 1 )
467
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
468
- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
469
- }
470
-
471
498
Entries[FreeIndex] = Entry;
472
- Entries[FreeIndex].Next = LRUHead;
473
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
474
- LRUHead = static_cast <u16 >(FreeIndex);
499
+ pushFront (FreeIndex, ListType);
475
500
EntriesCount++;
476
501
502
+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
503
+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
504
+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
505
+ }
477
506
// Availability stack should not have available entries when all entries
478
507
// are in use
479
508
if (EntriesCount == Config::getEntriesArraySize ())
480
509
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
481
510
}
482
511
483
- void remove (uptr I) REQUIRES(Mutex) {
484
- DCHECK (Entries[I].isValid ());
485
-
486
- Entries[I].invalidate ();
487
-
488
- if (I == LRUHead)
489
- LRUHead = Entries[I].Next ;
512
+ // Joins the entries adjacent to Entries[I], effectively
513
+ // unlinking Entries[I] from the list
514
+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
515
+ if (I == EntryLists[ListType].Head )
516
+ EntryLists[ListType].Head = Entries[I].Next ;
490
517
else
491
518
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
492
519
493
- if (I == LRUTail )
494
- LRUTail = Entries[I].Prev ;
520
+ if (I == EntryLists[ListType]. Tail )
521
+ EntryLists[ListType]. Tail = Entries[I].Prev ;
495
522
else
496
523
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
524
+ }
525
+
526
+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
527
+ // Entries[I] onto the stack of available entries
528
+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
529
+ DCHECK (Entries[I].isValid ());
530
+
531
+ Entries[I].invalidate ();
497
532
533
+ unlink (I, ListType);
498
534
Entries[I].Next = AvailableHead;
499
535
AvailableHead = static_cast <u16 >(I);
500
536
EntriesCount--;
501
537
502
538
// Cache should not have valid entries when not empty
503
539
if (EntriesCount == 0 ) {
504
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
505
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
540
+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
541
+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
542
+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
543
+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
506
544
}
507
545
}
508
546
547
+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
548
+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
549
+ EntryLists[ListType].Tail = static_cast <u16 >(I);
550
+ else
551
+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16 >(I);
552
+
553
+ Entries[I].Next = EntryLists[ListType].Head ;
554
+ Entries[I].Prev = CachedBlock::InvalidEntry;
555
+ EntryLists[ListType].Head = static_cast <u16 >(I);
556
+ }
557
+
509
558
void empty () {
510
559
MemMapT MapInfo[Config::getEntriesArraySize ()];
511
560
uptr N = 0 ;
512
561
{
513
562
ScopedLock L (Mutex);
514
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
515
- if (!Entries[I].isValid ())
516
- continue ;
517
- MapInfo[N] = Entries[I].MemMap ;
518
- remove (I);
519
- N++;
520
- }
563
+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
564
+ for (uptr I = EntryLists[ListType].Head ;
565
+ I != CachedBlock::InvalidEntry;) {
566
+ uptr ToRemove = I;
567
+ I = Entries[I].Next ;
568
+ MapInfo[N] = Entries[ToRemove].MemMap ;
569
+ remove (ToRemove, ListType);
570
+ N++;
571
+ }
572
+ };
573
+ emptyList (COMMITTED);
574
+ emptyList (DECOMMITTED);
521
575
EntriesCount = 0 ;
576
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
577
+ DCHECK (!Entries[I].isValid ());
522
578
}
523
579
for (uptr I = 0 ; I < N; I++) {
524
580
MemMapT &MemMap = MapInfo[I];
@@ -545,8 +601,14 @@ class MapAllocatorCache {
545
601
OldestTime = 0 ;
546
602
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
547
603
releaseIfOlderThan (Quarantine[I], Time);
548
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
604
+ for (u16 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
605
+ I = Entries[I].Next ) {
606
+ if (Entries[I].Time && Entries[I].Time <= Time) {
607
+ unlink (I, COMMITTED);
608
+ pushFront (I, DECOMMITTED);
609
+ }
549
610
releaseIfOlderThan (Entries[I], Time);
611
+ }
550
612
}
551
613
552
614
HybridMutex Mutex;
@@ -563,10 +625,12 @@ class MapAllocatorCache {
563
625
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
564
626
Quarantine GUARDED_BY (Mutex) = {};
565
627
566
- // The LRUHead of the cache is the most recently used cache entry
567
- u16 LRUHead GUARDED_BY (Mutex) = 0;
568
- // The LRUTail of the cache is the least recently used cache entry
569
- u16 LRUTail GUARDED_BY (Mutex) = 0;
628
+ // EntryLists stores the head and tail indices of all
629
+ // lists being used to store valid cache entries.
630
+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
631
+ // COMMITTED entries have memory chunks that have not been released to the OS
632
+ // DECOMMITTED entries have memory chunks that have been released to the OS
633
+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
570
634
// The AvailableHead is the top of the stack of available entries
571
635
u16 AvailableHead GUARDED_BY (Mutex) = 0;
572
636
};
@@ -706,6 +770,7 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
706
770
}
707
771
return Ptr;
708
772
}
773
+
709
774
// As with the Primary, the size passed to this function includes any desired
710
775
// alignment, so that the frontend can align the user allocation. The hint
711
776
// parameter allows us to unmap spurious memory when dealing with larger
0 commit comments