Skip to content

Commit 68f469b

Browse files
committed
[scudo] Refactor store() and retrieve().
store() and retrieve() have been refactored so that the scudo headers are abstracted away from low-level cache operations.
1 parent 30b5d4a commit 68f469b

File tree

1 file changed

+98
-78
lines changed

1 file changed

+98
-78
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 98 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,7 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
6565

6666
} // namespace LargeBlock
6767

68-
static inline void unmap(LargeBlock::Header *H) {
69-
// Note that the `H->MapMap` is stored on the pages managed by itself. Take
70-
// over the ownership before unmap() so that any operation along with unmap()
71-
// won't touch inaccessible pages.
72-
MemMapT MemMap = H->MemMap;
68+
static inline void unmap(MemMapT &MemMap) {
7369
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
7470
}
7571

@@ -96,12 +92,15 @@ struct CachedBlock {
9692
template <typename Config> class MapAllocatorNoCache {
9793
public:
9894
void init(UNUSED s32 ReleaseToOsInterval) {}
99-
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
100-
UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
101-
UNUSED bool *Zeroed) {
102-
return false;
95+
CachedBlock retrieve(UNUSED uptr Size, UNUSED uptr Alignment,
96+
UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
97+
return {};
10398
}
104-
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
99+
void store(UNUSED Options Options, UNUSED uptr CommitBase,
100+
UNUSED uptr CommitSize, UNUSED uptr BlockBegin, MemMapT MemMap) {
101+
unmap(MemMap);
102+
}
103+
105104
bool canCache(UNUSED uptr Size) { return false; }
106105
void disable() {}
107106
void enable() {}
@@ -239,19 +238,19 @@ template <typename Config> class MapAllocatorCache {
239238
Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
240239
}
241240

242-
void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
243-
if (!canCache(H->CommitSize))
244-
return unmap(H);
241+
void store(const Options &Options, uptr CommitBase, uptr CommitSize,
242+
uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
243+
DCHECK(canCache(CommitSize));
245244

246245
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
247246
u64 Time;
248247
CachedBlock Entry;
249-
250-
Entry.CommitBase = H->CommitBase;
251-
Entry.CommitSize = H->CommitSize;
252-
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
253-
Entry.MemMap = H->MemMap;
248+
Entry.CommitBase = CommitBase;
249+
Entry.CommitSize = CommitSize;
250+
Entry.BlockBegin = BlockBegin;
251+
Entry.MemMap = MemMap;
254252
Entry.Time = UINT64_MAX;
253+
255254
if (useMemoryTagging<Config>(Options)) {
256255
if (Interval == 0 && !SCUDO_FUCHSIA) {
257256
// Release the memory and make it inaccessible at the same time by
@@ -290,7 +289,7 @@ template <typename Config> class MapAllocatorCache {
290289
// read Options and when we locked Mutex. We can't insert our entry into
291290
// the quarantine or the cache because the permissions would be wrong so
292291
// just unmap it.
293-
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
292+
unmap(Entry.MemMap);
294293
break;
295294
}
296295
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -321,28 +320,28 @@ template <typename Config> class MapAllocatorCache {
321320
} while (0);
322321

323322
for (MemMapT &EvictMemMap : EvictionMemMaps)
324-
EvictMemMap.unmap(EvictMemMap.getBase(), EvictMemMap.getCapacity());
323+
unmap(EvictMemMap);
325324

326325
if (Interval >= 0) {
327326
// TODO: Add ReleaseToOS logic to LRU algorithm
328327
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
329328
}
330329
}
331330

332-
bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
333-
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
331+
CachedBlock retrieve(uptr Size, uptr Alignment, uptr HeadersSize,
332+
uptr &EntryHeaderPos) EXCLUDES(Mutex) {
334333
const uptr PageSize = getPageSizeCached();
335334
// 10% of the requested size proved to be the optimal choice for
336335
// retrieving cached blocks after testing several options.
337336
constexpr u32 FragmentedBytesDivisor = 10;
338337
bool Found = false;
339338
CachedBlock Entry;
340-
uptr EntryHeaderPos = 0;
339+
EntryHeaderPos = 0;
341340
{
342341
ScopedLock L(Mutex);
343342
CallsToRetrieve++;
344343
if (EntriesCount == 0)
345-
return false;
344+
return {};
346345
u32 OptimalFitIndex = 0;
347346
uptr MinDiff = UINTPTR_MAX;
348347
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
@@ -383,29 +382,8 @@ template <typename Config> class MapAllocatorCache {
383382
SuccessfulRetrieves++;
384383
}
385384
}
386-
if (!Found)
387-
return false;
388385

389-
*H = reinterpret_cast<LargeBlock::Header *>(
390-
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
391-
*Zeroed = Entry.Time == 0;
392-
if (useMemoryTagging<Config>(Options))
393-
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
394-
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
395-
if (useMemoryTagging<Config>(Options)) {
396-
if (*Zeroed) {
397-
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
398-
NewBlockBegin);
399-
} else if (Entry.BlockBegin < NewBlockBegin) {
400-
storeTags(Entry.BlockBegin, NewBlockBegin);
401-
} else {
402-
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
403-
}
404-
}
405-
(*H)->CommitBase = Entry.CommitBase;
406-
(*H)->CommitSize = Entry.CommitSize;
407-
(*H)->MemMap = Entry.MemMap;
408-
return true;
386+
return Entry;
409387
}
410388

411389
bool canCache(uptr Size) {
@@ -444,7 +422,7 @@ template <typename Config> class MapAllocatorCache {
444422
for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
445423
if (Quarantine[I].isValid()) {
446424
MemMapT &MemMap = Quarantine[I].MemMap;
447-
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
425+
unmap(MemMap);
448426
Quarantine[I].invalidate();
449427
}
450428
}
@@ -538,7 +516,7 @@ template <typename Config> class MapAllocatorCache {
538516
}
539517
for (uptr I = 0; I < N; I++) {
540518
MemMapT &MemMap = MapInfo[I];
541-
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
519+
unmap(MemMap);
542520
}
543521
}
544522

@@ -605,6 +583,9 @@ template <typename Config> class MapAllocator {
605583

606584
void deallocate(const Options &Options, void *Ptr);
607585

586+
void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment,
587+
uptr *BlockEndPtr, FillContentsMode FillContents);
588+
608589
static uptr getBlockEnd(void *Ptr) {
609590
auto *B = LargeBlock::getHeader<Config>(Ptr);
610591
return B->CommitBase + B->CommitSize;
@@ -665,6 +646,61 @@ template <typename Config> class MapAllocator {
665646
LocalStats Stats GUARDED_BY(Mutex);
666647
};
667648

649+
template <typename Config>
650+
void *
651+
MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
652+
uptr Alignment, uptr *BlockEndPtr,
653+
FillContentsMode FillContents) {
654+
CachedBlock Entry;
655+
uptr EntryHeaderPos;
656+
657+
Entry = Cache.retrieve(Size, Alignment, getHeadersSize(), EntryHeaderPos);
658+
if (!Entry.isValid())
659+
return nullptr;
660+
661+
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
662+
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
663+
bool Zeroed = Entry.Time == 0;
664+
if (useMemoryTagging<Config>(Options))
665+
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
666+
uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
667+
if (useMemoryTagging<Config>(Options)) {
668+
if (Zeroed) {
669+
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
670+
NewBlockBegin);
671+
} else if (Entry.BlockBegin < NewBlockBegin) {
672+
storeTags(Entry.BlockBegin, NewBlockBegin);
673+
} else {
674+
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
675+
}
676+
}
677+
678+
H->CommitBase = Entry.CommitBase;
679+
H->CommitSize = Entry.CommitSize;
680+
H->MemMap = Entry.MemMap;
681+
682+
const uptr BlockEnd = H->CommitBase + H->CommitSize;
683+
if (BlockEndPtr)
684+
*BlockEndPtr = BlockEnd;
685+
uptr HInt = reinterpret_cast<uptr>(H);
686+
if (allocatorSupportsMemoryTagging<Config>())
687+
HInt = untagPointer(HInt);
688+
const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
689+
void *Ptr = reinterpret_cast<void *>(PtrInt);
690+
if (FillContents && !Zeroed)
691+
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
692+
BlockEnd - PtrInt);
693+
{
694+
ScopedLock L(Mutex);
695+
InUseBlocks.push_back(H);
696+
AllocatedBytes += H->CommitSize;
697+
FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
698+
NumberOfAllocs++;
699+
Stats.add(StatAllocated, H->CommitSize);
700+
Stats.add(StatMapped, H->MemMap.getCapacity());
701+
}
702+
return Ptr;
703+
}
668704
// As with the Primary, the size passed to this function includes any desired
669705
// alignment, so that the frontend can align the user allocation. The hint
670706
// parameter allows us to unmap spurious memory when dealing with larger
@@ -690,32 +726,10 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
690726
const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
691727

692728
if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
693-
LargeBlock::Header *H;
694-
bool Zeroed;
695-
if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
696-
&Zeroed)) {
697-
const uptr BlockEnd = H->CommitBase + H->CommitSize;
698-
if (BlockEndPtr)
699-
*BlockEndPtr = BlockEnd;
700-
uptr HInt = reinterpret_cast<uptr>(H);
701-
if (allocatorSupportsMemoryTagging<Config>())
702-
HInt = untagPointer(HInt);
703-
const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
704-
void *Ptr = reinterpret_cast<void *>(PtrInt);
705-
if (FillContents && !Zeroed)
706-
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
707-
BlockEnd - PtrInt);
708-
{
709-
ScopedLock L(Mutex);
710-
InUseBlocks.push_back(H);
711-
AllocatedBytes += H->CommitSize;
712-
FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
713-
NumberOfAllocs++;
714-
Stats.add(StatAllocated, H->CommitSize);
715-
Stats.add(StatMapped, H->MemMap.getCapacity());
716-
}
729+
void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr,
730+
FillContents);
731+
if (Ptr != nullptr)
717732
return Ptr;
718-
}
719733
}
720734

721735
uptr RoundedSize =
@@ -740,9 +754,9 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
740754
// In the unlikely event of alignments larger than a page, adjust the amount
741755
// of memory we want to commit, and trim the extra memory.
742756
if (UNLIKELY(Alignment >= PageSize)) {
743-
// For alignments greater than or equal to a page, the user pointer (eg: the
744-
// pointer that is returned by the C or C++ allocation APIs) ends up on a
745-
// page boundary , and our headers will live in the preceding page.
757+
// For alignments greater than or equal to a page, the user pointer (eg:
758+
// the pointer that is returned by the C or C++ allocation APIs) ends up
759+
// on a page boundary , and our headers will live in the preceding page.
746760
CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
747761
const uptr NewMapBase = CommitBase - PageSize;
748762
DCHECK_GE(NewMapBase, MapBase);
@@ -765,7 +779,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
765779
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
766780
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
767781
MemMap)) {
768-
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
782+
unmap(MemMap);
769783
return nullptr;
770784
}
771785
const uptr HeaderPos = AllocPos - getHeadersSize();
@@ -807,7 +821,13 @@ void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
807821
Stats.sub(StatAllocated, CommitSize);
808822
Stats.sub(StatMapped, H->MemMap.getCapacity());
809823
}
810-
Cache.store(Options, H);
824+
825+
if (!Cache.canCache(H->CommitSize)) {
826+
unmap(H->MemMap);
827+
} else {
828+
Cache.store(Options, H->CommitBase, H->CommitSize,
829+
reinterpret_cast<uptr>(H + 1), H->MemMap);
830+
}
811831
}
812832

813833
template <typename Config>

0 commit comments

Comments
 (0)