@@ -65,11 +65,7 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
65
65
66
66
} // namespace LargeBlock
67
67
68
- static inline void unmap (LargeBlock::Header *H) {
69
- // Note that the `H->MapMap` is stored on the pages managed by itself. Take
70
- // over the ownership before unmap() so that any operation along with unmap()
71
- // won't touch inaccessible pages.
72
- MemMapT MemMap = H->MemMap ;
68
+ static inline void unmap (MemMapT &MemMap) {
73
69
MemMap.unmap (MemMap.getBase (), MemMap.getCapacity ());
74
70
}
75
71
@@ -96,12 +92,15 @@ struct CachedBlock {
96
92
template <typename Config> class MapAllocatorNoCache {
97
93
public:
98
94
void init (UNUSED s32 ReleaseToOsInterval) {}
99
- bool retrieve (UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
100
- UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
101
- UNUSED bool *Zeroed) {
102
- return false ;
95
+ CachedBlock retrieve (UNUSED uptr Size, UNUSED uptr Alignment,
96
+ UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
97
+ return {};
103
98
}
104
- void store (UNUSED Options Options, LargeBlock::Header *H) { unmap (H); }
99
+ void store (UNUSED Options Options, UNUSED uptr CommitBase,
100
+ UNUSED uptr CommitSize, UNUSED uptr BlockBegin, MemMapT MemMap) {
101
+ unmap (MemMap);
102
+ }
103
+
105
104
bool canCache (UNUSED uptr Size) { return false ; }
106
105
void disable () {}
107
106
void enable () {}
@@ -239,19 +238,19 @@ template <typename Config> class MapAllocatorCache {
239
238
Entries[Config::getEntriesArraySize () - 1 ].Next = CachedBlock::InvalidEntry;
240
239
}
241
240
242
- void store (const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
243
- if (! canCache (H-> CommitSize ))
244
- return unmap (H );
241
+ void store (const Options &Options, uptr CommitBase, uptr CommitSize,
242
+ uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
243
+ DCHECK ( canCache (CommitSize) );
245
244
246
245
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
247
246
u64 Time;
248
247
CachedBlock Entry;
249
-
250
- Entry.CommitBase = H->CommitBase ;
251
- Entry.CommitSize = H->CommitSize ;
252
- Entry.BlockBegin = reinterpret_cast <uptr>(H + 1 );
253
- Entry.MemMap = H->MemMap ;
248
+ Entry.CommitBase = CommitBase;
249
+ Entry.CommitSize = CommitSize;
250
+ Entry.BlockBegin = BlockBegin;
251
+ Entry.MemMap = MemMap;
254
252
Entry.Time = UINT64_MAX;
253
+
255
254
if (useMemoryTagging<Config>(Options)) {
256
255
if (Interval == 0 && !SCUDO_FUCHSIA) {
257
256
// Release the memory and make it inaccessible at the same time by
@@ -290,7 +289,7 @@ template <typename Config> class MapAllocatorCache {
290
289
// read Options and when we locked Mutex. We can't insert our entry into
291
290
// the quarantine or the cache because the permissions would be wrong so
292
291
// just unmap it.
293
- Entry. MemMap . unmap (Entry.MemMap . getBase (), Entry. MemMap . getCapacity () );
292
+ unmap (Entry.MemMap );
294
293
break ;
295
294
}
296
295
if (Config::getQuarantineSize () && useMemoryTagging<Config>(Options)) {
@@ -321,28 +320,28 @@ template <typename Config> class MapAllocatorCache {
321
320
} while (0 );
322
321
323
322
for (MemMapT &EvictMemMap : EvictionMemMaps)
324
- EvictMemMap. unmap (EvictMemMap. getBase (), EvictMemMap. getCapacity () );
323
+ unmap (EvictMemMap);
325
324
326
325
if (Interval >= 0 ) {
327
326
// TODO: Add ReleaseToOS logic to LRU algorithm
328
327
releaseOlderThan (Time - static_cast <u64 >(Interval) * 1000000 );
329
328
}
330
329
}
331
330
332
- bool retrieve (Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
333
- LargeBlock::Header **H, bool *Zeroed ) EXCLUDES(Mutex) {
331
+ CachedBlock retrieve (uptr Size, uptr Alignment, uptr HeadersSize,
332
+ uptr &EntryHeaderPos ) EXCLUDES(Mutex) {
334
333
const uptr PageSize = getPageSizeCached ();
335
334
// 10% of the requested size proved to be the optimal choice for
336
335
// retrieving cached blocks after testing several options.
337
336
constexpr u32 FragmentedBytesDivisor = 10 ;
338
337
bool Found = false ;
339
338
CachedBlock Entry;
340
- uptr EntryHeaderPos = 0 ;
339
+ EntryHeaderPos = 0 ;
341
340
{
342
341
ScopedLock L (Mutex);
343
342
CallsToRetrieve++;
344
343
if (EntriesCount == 0 )
345
- return false ;
344
+ return {} ;
346
345
u32 OptimalFitIndex = 0 ;
347
346
uptr MinDiff = UINTPTR_MAX;
348
347
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
@@ -383,29 +382,8 @@ template <typename Config> class MapAllocatorCache {
383
382
SuccessfulRetrieves++;
384
383
}
385
384
}
386
- if (!Found)
387
- return false ;
388
385
389
- *H = reinterpret_cast <LargeBlock::Header *>(
390
- LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
391
- *Zeroed = Entry.Time == 0 ;
392
- if (useMemoryTagging<Config>(Options))
393
- Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
394
- uptr NewBlockBegin = reinterpret_cast <uptr>(*H + 1 );
395
- if (useMemoryTagging<Config>(Options)) {
396
- if (*Zeroed) {
397
- storeTags (LargeBlock::addHeaderTag<Config>(Entry.CommitBase ),
398
- NewBlockBegin);
399
- } else if (Entry.BlockBegin < NewBlockBegin) {
400
- storeTags (Entry.BlockBegin , NewBlockBegin);
401
- } else {
402
- storeTags (untagPointer (NewBlockBegin), untagPointer (Entry.BlockBegin ));
403
- }
404
- }
405
- (*H)->CommitBase = Entry.CommitBase ;
406
- (*H)->CommitSize = Entry.CommitSize ;
407
- (*H)->MemMap = Entry.MemMap ;
408
- return true ;
386
+ return Entry;
409
387
}
410
388
411
389
bool canCache (uptr Size) {
@@ -444,7 +422,7 @@ template <typename Config> class MapAllocatorCache {
444
422
for (u32 I = 0 ; I != Config::getQuarantineSize (); ++I) {
445
423
if (Quarantine[I].isValid ()) {
446
424
MemMapT &MemMap = Quarantine[I].MemMap ;
447
- MemMap. unmap (MemMap. getBase (), MemMap. getCapacity () );
425
+ unmap (MemMap);
448
426
Quarantine[I].invalidate ();
449
427
}
450
428
}
@@ -538,7 +516,7 @@ template <typename Config> class MapAllocatorCache {
538
516
}
539
517
for (uptr I = 0 ; I < N; I++) {
540
518
MemMapT &MemMap = MapInfo[I];
541
- MemMap. unmap (MemMap. getBase (), MemMap. getCapacity () );
519
+ unmap (MemMap);
542
520
}
543
521
}
544
522
@@ -605,6 +583,9 @@ template <typename Config> class MapAllocator {
605
583
606
584
void deallocate (const Options &Options, void *Ptr);
607
585
586
+ void *tryAllocateFromCache (const Options &Options, uptr Size, uptr Alignment,
587
+ uptr *BlockEndPtr, FillContentsMode FillContents);
588
+
608
589
static uptr getBlockEnd (void *Ptr) {
609
590
auto *B = LargeBlock::getHeader<Config>(Ptr);
610
591
return B->CommitBase + B->CommitSize ;
@@ -665,6 +646,60 @@ template <typename Config> class MapAllocator {
665
646
LocalStats Stats GUARDED_BY (Mutex);
666
647
};
667
648
649
+ template <typename Config>
650
+ void *
651
+ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
652
+ uptr Alignment, uptr *BlockEndPtr,
653
+ FillContentsMode FillContents) {
654
+ CachedBlock Entry;
655
+ uptr EntryHeaderPos;
656
+
657
+ Entry = Cache.retrieve (Size, Alignment, getHeadersSize (), EntryHeaderPos);
658
+ if (!Entry.isValid ())
659
+ return nullptr ;
660
+
661
+ LargeBlock::Header *H = reinterpret_cast <LargeBlock::Header *>(
662
+ LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
663
+ bool Zeroed = Entry.Time == 0 ;
664
+ if (useMemoryTagging<Config>(Options)) {
665
+ uptr NewBlockBegin = reinterpret_cast <uptr>(H + 1 );
666
+ Entry.MemMap .setMemoryPermission (Entry.CommitBase , Entry.CommitSize , 0 );
667
+ if (Zeroed) {
668
+ storeTags (LargeBlock::addHeaderTag<Config>(Entry.CommitBase ),
669
+ NewBlockBegin);
670
+ } else if (Entry.BlockBegin < NewBlockBegin) {
671
+ storeTags (Entry.BlockBegin , NewBlockBegin);
672
+ } else {
673
+ storeTags (untagPointer (NewBlockBegin), untagPointer (Entry.BlockBegin ));
674
+ }
675
+ }
676
+
677
+ H->CommitBase = Entry.CommitBase ;
678
+ H->CommitSize = Entry.CommitSize ;
679
+ H->MemMap = Entry.MemMap ;
680
+
681
+ const uptr BlockEnd = H->CommitBase + H->CommitSize ;
682
+ if (BlockEndPtr)
683
+ *BlockEndPtr = BlockEnd;
684
+ uptr HInt = reinterpret_cast <uptr>(H);
685
+ if (allocatorSupportsMemoryTagging<Config>())
686
+ HInt = untagPointer (HInt);
687
+ const uptr PtrInt = HInt + LargeBlock::getHeaderSize ();
688
+ void *Ptr = reinterpret_cast <void *>(PtrInt);
689
+ if (FillContents && !Zeroed)
690
+ memset (Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
691
+ BlockEnd - PtrInt);
692
+ {
693
+ ScopedLock L (Mutex);
694
+ InUseBlocks.push_back (H);
695
+ AllocatedBytes += H->CommitSize ;
696
+ FragmentedBytes += H->MemMap .getCapacity () - H->CommitSize ;
697
+ NumberOfAllocs++;
698
+ Stats.add (StatAllocated, H->CommitSize );
699
+ Stats.add (StatMapped, H->MemMap .getCapacity ());
700
+ }
701
+ return Ptr;
702
+ }
668
703
// As with the Primary, the size passed to this function includes any desired
669
704
// alignment, so that the frontend can align the user allocation. The hint
670
705
// parameter allows us to unmap spurious memory when dealing with larger
@@ -690,32 +725,10 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
690
725
const uptr MinNeededSizeForCache = roundUp (Size + getHeadersSize (), PageSize);
691
726
692
727
if (Alignment < PageSize && Cache.canCache (MinNeededSizeForCache)) {
693
- LargeBlock::Header *H;
694
- bool Zeroed;
695
- if (Cache.retrieve (Options, Size, Alignment, getHeadersSize (), &H,
696
- &Zeroed)) {
697
- const uptr BlockEnd = H->CommitBase + H->CommitSize ;
698
- if (BlockEndPtr)
699
- *BlockEndPtr = BlockEnd;
700
- uptr HInt = reinterpret_cast <uptr>(H);
701
- if (allocatorSupportsMemoryTagging<Config>())
702
- HInt = untagPointer (HInt);
703
- const uptr PtrInt = HInt + LargeBlock::getHeaderSize ();
704
- void *Ptr = reinterpret_cast <void *>(PtrInt);
705
- if (FillContents && !Zeroed)
706
- memset (Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
707
- BlockEnd - PtrInt);
708
- {
709
- ScopedLock L (Mutex);
710
- InUseBlocks.push_back (H);
711
- AllocatedBytes += H->CommitSize ;
712
- FragmentedBytes += H->MemMap .getCapacity () - H->CommitSize ;
713
- NumberOfAllocs++;
714
- Stats.add (StatAllocated, H->CommitSize );
715
- Stats.add (StatMapped, H->MemMap .getCapacity ());
716
- }
728
+ void *Ptr = tryAllocateFromCache (Options, Size, Alignment, BlockEndPtr,
729
+ FillContents);
730
+ if (Ptr != nullptr )
717
731
return Ptr;
718
- }
719
732
}
720
733
721
734
uptr RoundedSize =
@@ -740,9 +753,9 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
740
753
// In the unlikely event of alignments larger than a page, adjust the amount
741
754
// of memory we want to commit, and trim the extra memory.
742
755
if (UNLIKELY (Alignment >= PageSize)) {
743
- // For alignments greater than or equal to a page, the user pointer (eg: the
744
- // pointer that is returned by the C or C++ allocation APIs) ends up on a
745
- // page boundary , and our headers will live in the preceding page.
756
+ // For alignments greater than or equal to a page, the user pointer (eg:
757
+ // the pointer that is returned by the C or C++ allocation APIs) ends up
758
+ // on a page boundary , and our headers will live in the preceding page.
746
759
CommitBase = roundUp (MapBase + PageSize + 1 , Alignment) - PageSize;
747
760
const uptr NewMapBase = CommitBase - PageSize;
748
761
DCHECK_GE (NewMapBase, MapBase);
@@ -765,7 +778,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
765
778
const uptr AllocPos = roundDown (CommitBase + CommitSize - Size, Alignment);
766
779
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0 ,
767
780
MemMap)) {
768
- MemMap. unmap (MemMap. getBase (), MemMap. getCapacity () );
781
+ unmap (MemMap);
769
782
return nullptr ;
770
783
}
771
784
const uptr HeaderPos = AllocPos - getHeadersSize ();
@@ -807,7 +820,13 @@ void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
807
820
Stats.sub (StatAllocated, CommitSize);
808
821
Stats.sub (StatMapped, H->MemMap .getCapacity ());
809
822
}
810
- Cache.store (Options, H);
823
+
824
+ if (Cache.canCache (H->CommitSize )) {
825
+ Cache.store (Options, H->CommitBase , H->CommitSize ,
826
+ reinterpret_cast <uptr>(H + 1 ), H->MemMap );
827
+ } else {
828
+ unmap (H->MemMap );
829
+ }
811
830
}
812
831
813
832
template <typename Config>
0 commit comments