Skip to content

Commit 4c6b8bb

Browse files
FernandoChiaHungDuan
authored andcommitted
[scudo] seperate cache retrieval logic
Split cache::retrieve() into separate functions. One that retrieves the cached block and another that sets the header and MTE environment. These were split so that the retrieve function could be more easily changed in the future and so that the retrieve function had the sole purpose of retrieving a CachedBlock. Reviewed By: cferris Differential Revision: https://reviews.llvm.org/D155660
1 parent 7a4968b commit 4c6b8bb

File tree

1 file changed

+45
-53
lines changed

1 file changed

+45
-53
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 45 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,20 @@ static inline void unmap(LargeBlock::Header *H) {
7272
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
7373
}
7474

75+
namespace {
76+
struct CachedBlock {
77+
uptr CommitBase = 0;
78+
uptr CommitSize = 0;
79+
uptr BlockBegin = 0;
80+
MemMapT MemMap = {};
81+
u64 Time = 0;
82+
};
83+
} // namespace
84+
7585
template <typename Config> class MapAllocatorNoCache {
7686
public:
7787
void init(UNUSED s32 ReleaseToOsInterval) {}
78-
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
79-
UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
80-
return false;
81-
}
88+
bool retrieve(UNUSED uptr Size, UNUSED CachedBlock &Entry) { return false; }
8289
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
8390
bool canCache(UNUSED uptr Size) { return false; }
8491
void disable() {}
@@ -248,62 +255,26 @@ template <typename Config> class MapAllocatorCache {
248255
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
249256
}
250257

251-
bool retrieve(Options Options, uptr Size, uptr Alignment,
252-
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
253-
const uptr PageSize = getPageSizeCached();
258+
bool retrieve(uptr Size, CachedBlock &Entry) EXCLUDES(Mutex) {
254259
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
255260
bool Found = false;
256-
CachedBlock Entry;
257-
uptr HeaderPos = 0;
258261
{
259262
ScopedLock L(Mutex);
260263
if (EntriesCount == 0)
261264
return false;
262265
for (u32 I = 0; I < MaxCount; I++) {
263-
const uptr CommitBase = Entries[I].CommitBase;
264-
if (!CommitBase)
265-
continue;
266-
const uptr CommitSize = Entries[I].CommitSize;
267-
const uptr AllocPos =
268-
roundDown(CommitBase + CommitSize - Size, Alignment);
269-
HeaderPos =
270-
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
271-
if (HeaderPos > CommitBase + CommitSize)
266+
if (!Entries[I].CommitBase)
272267
continue;
273-
if (HeaderPos < CommitBase ||
274-
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
268+
if (Size > Entries[I].CommitSize)
275269
continue;
276-
}
277270
Found = true;
278271
Entry = Entries[I];
279272
Entries[I].CommitBase = 0;
280273
EntriesCount--;
281274
break;
282275
}
283276
}
284-
if (!Found)
285-
return false;
286-
287-
*H = reinterpret_cast<LargeBlock::Header *>(
288-
LargeBlock::addHeaderTag<Config>(HeaderPos));
289-
*Zeroed = Entry.Time == 0;
290-
if (useMemoryTagging<Config>(Options))
291-
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
292-
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
293-
if (useMemoryTagging<Config>(Options)) {
294-
if (*Zeroed) {
295-
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
296-
NewBlockBegin);
297-
} else if (Entry.BlockBegin < NewBlockBegin) {
298-
storeTags(Entry.BlockBegin, NewBlockBegin);
299-
} else {
300-
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
301-
}
302-
}
303-
(*H)->CommitBase = Entry.CommitBase;
304-
(*H)->CommitSize = Entry.CommitSize;
305-
(*H)->MemMap = Entry.MemMap;
306-
return true;
277+
return Found;
307278
}
308279

309280
bool canCache(uptr Size) {
@@ -383,14 +354,6 @@ template <typename Config> class MapAllocatorCache {
383354
}
384355
}
385356

386-
struct CachedBlock {
387-
uptr CommitBase = 0;
388-
uptr CommitSize = 0;
389-
uptr BlockBegin = 0;
390-
MemMapT MemMap = {};
391-
u64 Time = 0;
392-
};
393-
394357
void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
395358
if (!Entry.CommitBase || !Entry.Time)
396359
return;
@@ -476,6 +439,27 @@ template <typename Config> class MapAllocator {
476439
}
477440
}
478441

442+
inline void setHeader(Options Options, CachedBlock &Entry,
443+
LargeBlock::Header *H, bool &Zeroed) {
444+
Zeroed = Entry.Time == 0;
445+
if (useMemoryTagging<Config>(Options)) {
446+
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
447+
// Block begins after the LargeBlock::Header
448+
uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
449+
if (Zeroed) {
450+
storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
451+
NewBlockBegin);
452+
} else if (Entry.BlockBegin < NewBlockBegin) {
453+
storeTags(Entry.BlockBegin, NewBlockBegin);
454+
} else {
455+
storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
456+
}
457+
}
458+
H->CommitBase = Entry.CommitBase;
459+
H->CommitSize = Entry.CommitSize;
460+
H->MemMap = Entry.MemMap;
461+
}
462+
479463
bool canCache(uptr Size) { return Cache.canCache(Size); }
480464

481465
bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
@@ -530,7 +514,15 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
530514
if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
531515
LargeBlock::Header *H;
532516
bool Zeroed;
533-
if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
517+
CachedBlock Entry;
518+
if (Cache.retrieve(RoundedSize, Entry)) {
519+
const uptr AllocPos =
520+
roundDown(Entry.CommitBase + Entry.CommitSize - Size, Alignment);
521+
const uptr HeaderPos =
522+
AllocPos - LargeBlock::getHeaderSize() - Chunk::getHeaderSize();
523+
H = reinterpret_cast<LargeBlock::Header *>(
524+
LargeBlock::addHeaderTag<Config>(HeaderPos));
525+
setHeader(Options, Entry, H, Zeroed);
534526
const uptr BlockEnd = H->CommitBase + H->CommitSize;
535527
if (BlockEndPtr)
536528
*BlockEndPtr = BlockEnd;

0 commit comments

Comments
 (0)