Skip to content

Commit 2336ef9

Browse files
authored
[scudo] Refactor store() and retrieve(). (llvm#102024)
store() and retrieve() have been refactored so that the scudo headers are abstracted away from cache operations.
1 parent 2f28378 commit 2336ef9

File tree

1 file changed

+97
-78
lines changed

1 file changed

+97
-78
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 97 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,7 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
6565

6666
} // namespace LargeBlock
6767

68-
static inline void unmap(LargeBlock::Header *H) {
69-
// Note that the `H->MapMap` is stored on the pages managed by itself. Take
70-
// over the ownership before unmap() so that any operation along with unmap()
71-
// won't touch inaccessible pages.
72-
MemMapT MemMap = H->MemMap;
68+
static inline void unmap(MemMapT &MemMap) {
7369
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
7470
}
7571

@@ -96,12 +92,15 @@ struct CachedBlock {
9692
template <typename Config> class MapAllocatorNoCache {
9793
public:
9894
void init(UNUSED s32 ReleaseToOsInterval) {}
99-
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
100-
UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
101-
UNUSED bool *Zeroed) {
102-
return false;
95+
CachedBlock retrieve(UNUSED uptr Size, UNUSED uptr Alignment,
96+
UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
97+
return {};
10398
}
104-
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
99+
void store(UNUSED Options Options, UNUSED uptr CommitBase,
100+
UNUSED uptr CommitSize, UNUSED uptr BlockBegin, MemMapT MemMap) {
101+
unmap(MemMap);
102+
}
103+
105104
bool canCache(UNUSED uptr Size) { return false; }
106105
void disable() {}
107106
void enable() {}
@@ -239,19 +238,19 @@ template <typename Config> class MapAllocatorCache {
239238
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
240239
}
241240

242-
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
243-
if (!canCache(H->CommitSize))
244-
return unmap(H);
241+
void store(const Options &Options, uptr CommitBase, uptr CommitSize,
242+
uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
243+
DCHECK(canCache(CommitSize));
245244

246245
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
247246
u64 Time;
248247
CachedBlock Entry;
249-
250-
Entry.CommitBase = H->CommitBase;
251-
Entry.CommitSize = H->CommitSize;
252-
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
253-
Entry.MemMap = H->MemMap;
248+
Entry.CommitBase = CommitBase;
249+
Entry.CommitSize = CommitSize;
250+
Entry.BlockBegin = BlockBegin;
251+
Entry.MemMap = MemMap;
254252
Entry.Time = UINT64_MAX;
253+
255254
if (useMemoryTagging<Config>(Options)) {
256255
if (Interval == 0 && !SCUDO_FUCHSIA) {
257256
// Release the memory and make it inaccessible at the same time by
@@ -290,7 +289,7 @@ template <typename Config> class MapAllocatorCache {
290289
// read Options and when we locked Mutex. We can't insert our entry into
291290
// the quarantine or the cache because the permissions would be wrong so
292291
// just unmap it.
293-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
292+
unmap(Entry.MemMap);
294293
break;
295294
}
296295
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -321,28 +320,28 @@ template <typename Config> class MapAllocatorCache {
321320
} while (0);
322321

323322
for (MemMapT &EvictMemMap : EvictionMemMaps)
324-
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
323+
unmap(EvictMemMap);
325324

326325
if (Interval >= 0) {
327326
// TODO: Add ReleaseToOS logic to LRU algorithm
328327
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
329328
}
330329
}
331330

332-
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
333-
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
331+
CachedBlock retrieve(uptr Size, uptr Alignment, uptr HeadersSize,
332+
uptr &EntryHeaderPos) EXCLUDES(Mutex) {
334333
const uptr PageSize = getPageSizeCached();
335334
// 10% of the requested size proved to be the optimal choice for
336335
// retrieving cached blocks after testing several options.
337336
constexpr u32 FragmentedBytesDivisor = 10;
338337
bool Found = false;
339338
CachedBlock Entry;
340-
uptr EntryHeaderPos = 0;
339+
EntryHeaderPos = 0;
341340
{
342341
ScopedLock L(Mutex);
343342
CallsToRetrieve++;
344343
if (EntriesCount == 0)
345-
return false;
344+
return {};
346345
u32 OptimalFitIndex = 0;
347346
uptr MinDiff = UINTPTR_MAX;
348347
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
@@ -383,29 +382,8 @@ template <typename Config> class MapAllocatorCache {
383382
SuccessfulRetrieves++;
384383
}
385384
}
386-
if (!Found)
387-
return false;
388385

389-
*H = reinterpret_cast<LargeBlock::Header *>(
390-
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
391-
*Zeroed = Entry.Time == 0;
392-
if (useMemoryTagging<Config>(Options))
393-
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
394-
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
395-
if (useMemoryTagging<Config>(Options)) {
396-
if (*Zeroed) {
397-
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
398-
NewBlockBegin);
399-
} else if (Entry.BlockBegin < NewBlockBegin) {
400-
storeTags(Entry.BlockBegin, NewBlockBegin);
401-
} else {
402-
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
403-
}
404-
}
405-
(*H)->CommitBase = Entry.CommitBase;
406-
(*H)->CommitSize = Entry.CommitSize;
407-
(*H)->MemMap = Entry.MemMap;
408-
return true;
386+
return Entry;
409387
}
410388

411389
bool canCache(uptr Size) {
@@ -444,7 +422,7 @@ template <typename Config> class MapAllocatorCache {
444422
for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
445423
if (Quarantine[I].isValid()) {
446424
MemMapT &MemMap = Quarantine[I].MemMap;
447-
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
425+
unmap(MemMap);
448426
Quarantine[I].invalidate();
449427
}
450428
}
@@ -538,7 +516,7 @@ template <typename Config> class MapAllocatorCache {
538516
}
539517
for (uptr I = 0; I < N; I++) {
540518
MemMapT &MemMap = MapInfo[I];
541-
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
519+
unmap(MemMap);
542520
}
543521
}
544522

@@ -605,6 +583,9 @@ template <typename Config> class MapAllocator {
605583

606584
void deallocate(const Options &Options, void *Ptr);
607585

586+
void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment,
587+
uptr *BlockEndPtr, FillContentsMode FillContents);
588+
608589
static uptr getBlockEnd(void *Ptr) {
609590
auto *B = LargeBlock::getHeader<Config>(Ptr);
610591
return B->CommitBase + B->CommitSize;
@@ -665,6 +646,60 @@ template <typename Config> class MapAllocator {
665646
LocalStats Stats GUARDED_BY(Mutex);
666647
};
667648

649+
template <typename Config>
650+
void *
651+
MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
652+
uptr Alignment, uptr *BlockEndPtr,
653+
FillContentsMode FillContents) {
654+
CachedBlock Entry;
655+
uptr EntryHeaderPos;
656+
657+
Entry = Cache.retrieve(Size, Alignment, getHeadersSize(), EntryHeaderPos);
658+
if (!Entry.isValid())
659+
return nullptr;
660+
661+
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
662+
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
663+
bool Zeroed = Entry.Time == 0;
664+
if (useMemoryTagging<Config>(Options)) {
665+
uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
666+
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
667+
if (Zeroed) {
668+
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
669+
NewBlockBegin);
670+
} else if (Entry.BlockBegin < NewBlockBegin) {
671+
storeTags(Entry.BlockBegin, NewBlockBegin);
672+
} else {
673+
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
674+
}
675+
}
676+
677+
H->CommitBase = Entry.CommitBase;
678+
H->CommitSize = Entry.CommitSize;
679+
H->MemMap = Entry.MemMap;
680+
681+
const uptr BlockEnd = H->CommitBase + H->CommitSize;
682+
if (BlockEndPtr)
683+
*BlockEndPtr = BlockEnd;
684+
uptr HInt = reinterpret_cast<uptr>(H);
685+
if (allocatorSupportsMemoryTagging<Config>())
686+
HInt = untagPointer(HInt);
687+
const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
688+
void *Ptr = reinterpret_cast<void *>(PtrInt);
689+
if (FillContents && !Zeroed)
690+
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
691+
BlockEnd - PtrInt);
692+
{
693+
ScopedLock L(Mutex);
694+
InUseBlocks.push_back(H);
695+
AllocatedBytes += H->CommitSize;
696+
FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
697+
NumberOfAllocs++;
698+
Stats.add(StatAllocated, H->CommitSize);
699+
Stats.add(StatMapped, H->MemMap.getCapacity());
700+
}
701+
return Ptr;
702+
}
668703
// As with the Primary, the size passed to this function includes any desired
669704
// alignment, so that the frontend can align the user allocation. The hint
670705
// parameter allows us to unmap spurious memory when dealing with larger
@@ -690,32 +725,10 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
690725
const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
691726

692727
if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
693-
LargeBlock::Header *H;
694-
bool Zeroed;
695-
if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
696-
&Zeroed)) {
697-
const uptr BlockEnd = H->CommitBase + H->CommitSize;
698-
if (BlockEndPtr)
699-
*BlockEndPtr = BlockEnd;
700-
uptr HInt = reinterpret_cast<uptr>(H);
701-
if (allocatorSupportsMemoryTagging<Config>())
702-
HInt = untagPointer(HInt);
703-
const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
704-
void *Ptr = reinterpret_cast<void *>(PtrInt);
705-
if (FillContents && !Zeroed)
706-
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
707-
BlockEnd - PtrInt);
708-
{
709-
ScopedLock L(Mutex);
710-
InUseBlocks.push_back(H);
711-
AllocatedBytes += H->CommitSize;
712-
FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
713-
NumberOfAllocs++;
714-
Stats.add(StatAllocated, H->CommitSize);
715-
Stats.add(StatMapped, H->MemMap.getCapacity());
716-
}
728+
void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr,
729+
FillContents);
730+
if (Ptr != nullptr)
717731
return Ptr;
718-
}
719732
}
720733

721734
uptr RoundedSize =
@@ -740,9 +753,9 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
740753
// In the unlikely event of alignments larger than a page, adjust the amount
741754
// of memory we want to commit, and trim the extra memory.
742755
if (UNLIKELY(Alignment >= PageSize)) {
743-
// For alignments greater than or equal to a page, the user pointer (eg: the
744-
// pointer that is returned by the C or C++ allocation APIs) ends up on a
745-
// page boundary , and our headers will live in the preceding page.
756+
// For alignments greater than or equal to a page, the user pointer (eg:
757+
// the pointer that is returned by the C or C++ allocation APIs) ends up
758+
// on a page boundary , and our headers will live in the preceding page.
746759
CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
747760
const uptr NewMapBase = CommitBase - PageSize;
748761
DCHECK_GE(NewMapBase, MapBase);
@@ -765,7 +778,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
765778
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
766779
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
767780
MemMap)) {
768-
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
781+
unmap(MemMap);
769782
return nullptr;
770783
}
771784
const uptr HeaderPos = AllocPos - getHeadersSize();
@@ -807,7 +820,13 @@ void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
807820
Stats.sub(StatAllocated, CommitSize);
808821
Stats.sub(StatMapped, H->MemMap.getCapacity());
809822
}
810-
Cache.store(Options, H);
823+
824+
if (Cache.canCache(H->CommitSize)) {
825+
Cache.store(Options, H->CommitBase, H->CommitSize,
826+
reinterpret_cast<uptr>(H + 1), H->MemMap);
827+
} else {
828+
unmap(H->MemMap);
829+
}
811830
}
812831

813832
template <typename Config>

0 commit comments

Comments
 (0)