-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[scudo] Use internal list to manage the LRU cache #117946
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-compiler-rt-sanitizer Author: None (ChiaHungDuan) ChangesFull diff: https://github.com/llvm/llvm-project/pull/117946.diff 2 Files Affected:
diff --git a/compiler-rt/lib/scudo/standalone/list.h b/compiler-rt/lib/scudo/standalone/list.h
index 6b952a610e3055..c6bd32a8fa3251 100644
--- a/compiler-rt/lib/scudo/standalone/list.h
+++ b/compiler-rt/lib/scudo/standalone/list.h
@@ -61,10 +61,11 @@ template <class T> class LinkOp<T, /*LinkWithPtr=*/false> {
using LinkTy = decltype(T::Next);
LinkOp() = default;
- LinkOp(T *BaseT, uptr BaseSize) : Base(BaseT), Size(BaseSize) {}
+ // TODO: Check if the `BaseSize` can fit in `Size`.
+ LinkOp(T *BaseT, uptr BaseSize)
+ : Base(BaseT), Size(static_cast<LinkTy>(BaseSize)) {}
void init(T *LinkBase, uptr BaseSize) {
Base = LinkBase;
- // TODO: Check if the `BaseSize` can fit in `Size`.
Size = static_cast<LinkTy>(BaseSize);
}
T *getBase() const { return Base; }
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 2fae29e5a21687..9f6b563adf8dc3 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -71,7 +71,8 @@ namespace {
struct CachedBlock {
static constexpr u16 CacheIndexMax = UINT16_MAX;
- static constexpr u16 InvalidEntry = CacheIndexMax;
+ static constexpr scudo::uptr EndOfListVal = CacheIndexMax;
+
// We allow a certain amount of fragmentation and part of the fragmented bytes
// will be released by `releaseAndZeroPagesToOS()`. This increases the chance
// of cache hit rate and reduces the overhead to the RSS at the same time. See
@@ -206,17 +207,16 @@ class MapAllocatorCache {
&Fractional);
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
Str->append(
- "Stats: MapAllocatorCache: EntriesCount: %d, "
+ "Stats: MapAllocatorCache: EntriesCount: %zu, "
"MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
- EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
+ LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
"(%zu.%02zu%%)\n",
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
- CachedBlock &Entry = Entries[I];
+ for (CachedBlock &Entry : LRUEntries) {
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
"BlockSize: %zu %s\n",
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
@@ -234,7 +234,7 @@ class MapAllocatorCache {
"Cache entry array is too large to be indexed.");
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
- DCHECK_EQ(EntriesCount, 0U);
+ DCHECK_EQ(LRUEntries.size(), 0U);
setOption(Option::MaxCacheEntriesCount,
static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
setOption(Option::MaxCacheEntrySize,
@@ -244,17 +244,13 @@ class MapAllocatorCache {
ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
- // The cache is initially empty
- LRUHead = CachedBlock::InvalidEntry;
- LRUTail = CachedBlock::InvalidEntry;
-
- // Available entries will be retrieved starting from the beginning of the
- // Entries array
- AvailableHead = 0;
- for (u32 I = 0; I < Config::getEntriesArraySize() - 1; I++)
- Entries[I].Next = static_cast<u16>(I + 1);
+ LRUEntries.clear();
+ LRUEntries.init(Entries, sizeof(Entries));
- Entries[Config::getEntriesArraySize() - 1].Next = CachedBlock::InvalidEntry;
+ AvailEntries.clear();
+ AvailEntries.init(Entries, sizeof(Entries));
+ for (u32 I = 0; I < Config::getEntriesArraySize(); I++)
+ AvailEntries.push_back(&Entries[I]);
}
void store(const Options &Options, uptr CommitBase, uptr CommitSize,
@@ -329,8 +325,9 @@ class MapAllocatorCache {
// All excess entries are evicted from the cache
while (needToEvict()) {
// Save MemMaps of evicted entries to perform unmap outside of lock
- EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
- remove(LRUTail);
+ CachedBlock *Entry = LRUEntries.back();
+ EvictionMemMaps.push_back(Entry->MemMap);
+ remove(Entry);
}
insert(Entry);
@@ -360,9 +357,9 @@ class MapAllocatorCache {
{
ScopedLock L(Mutex);
CallsToRetrieve++;
- if (EntriesCount == 0)
+ if (LRUEntries.size() == 0)
return {};
- u16 RetrievedIndex = CachedBlock::InvalidEntry;
+ CachedBlock *RetrievedEntry = nullptr;
uptr MinDiff = UINTPTR_MAX;
// Since allocation sizes don't always match cached memory chunk sizes
@@ -382,10 +379,9 @@ class MapAllocatorCache {
// well as the header metadata. If EntryHeaderPos - CommitBase exceeds
// MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
// not considered valid for retrieval.
- for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
- I = Entries[I].Next) {
- const uptr CommitBase = Entries[I].CommitBase;
- const uptr CommitSize = Entries[I].CommitSize;
+ for (CachedBlock &Entry : LRUEntries) {
+ const uptr CommitBase = Entry.CommitBase;
+ const uptr CommitSize = Entry.CommitSize;
const uptr AllocPos =
roundDown(CommitBase + CommitSize - Size, Alignment);
const uptr HeaderPos = AllocPos - HeadersSize;
@@ -408,7 +404,7 @@ class MapAllocatorCache {
continue;
MinDiff = Diff;
- RetrievedIndex = I;
+ RetrievedEntry = &Entry;
EntryHeaderPos = HeaderPos;
// Immediately use a cached block if its size is close enough to the
@@ -418,9 +414,10 @@ class MapAllocatorCache {
if (Diff <= OptimalFitThesholdBytes)
break;
}
- if (RetrievedIndex != CachedBlock::InvalidEntry) {
- Entry = Entries[RetrievedIndex];
- remove(RetrievedIndex);
+
+ if (RetrievedEntry != nullptr) {
+ Entry = *RetrievedEntry;
+ remove(RetrievedEntry);
SuccessfulRetrieves++;
}
}
@@ -499,9 +496,8 @@ class MapAllocatorCache {
Quarantine[I].invalidate();
}
}
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
- Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
- Entries[I].CommitSize, 0);
+ for (CachedBlock &Entry : LRUEntries) {
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
}
QuarantinePos = -1U;
}
@@ -514,63 +510,22 @@ class MapAllocatorCache {
private:
bool needToEvict() REQUIRES(Mutex) {
- return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
+ return (LRUEntries.size() >= atomic_load_relaxed(&MaxEntriesCount));
}
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
- DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
-
- // Cache should be populated with valid entries when not empty
- DCHECK_NE(AvailableHead, CachedBlock::InvalidEntry);
-
- u32 FreeIndex = AvailableHead;
- AvailableHead = Entries[AvailableHead].Next;
-
- if (EntriesCount == 0) {
- LRUTail = static_cast<u16>(FreeIndex);
- } else {
- // Check list order
- if (EntriesCount > 1)
- DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
- Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
- }
-
- Entries[FreeIndex] = Entry;
- Entries[FreeIndex].Next = LRUHead;
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
- LRUHead = static_cast<u16>(FreeIndex);
- EntriesCount++;
+ CachedBlock *FreeEntry = AvailEntries.front();
+ AvailEntries.pop_front();
- // Availability stack should not have available entries when all entries
- // are in use
- if (EntriesCount == Config::getEntriesArraySize())
- DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
+ *FreeEntry = Entry;
+ LRUEntries.push_front(FreeEntry);
}
- void remove(uptr I) REQUIRES(Mutex) {
- DCHECK(Entries[I].isValid());
-
- Entries[I].invalidate();
-
- if (I == LRUHead)
- LRUHead = Entries[I].Next;
- else
- Entries[Entries[I].Prev].Next = Entries[I].Next;
-
- if (I == LRUTail)
- LRUTail = Entries[I].Prev;
- else
- Entries[Entries[I].Next].Prev = Entries[I].Prev;
-
- Entries[I].Next = AvailableHead;
- AvailableHead = static_cast<u16>(I);
- EntriesCount--;
-
- // Cache should not have valid entries when not empty
- if (EntriesCount == 0) {
- DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
- DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
- }
+ void remove(CachedBlock *Entry) REQUIRES(Mutex) {
+ DCHECK(Entry->isValid());
+ LRUEntries.remove(Entry);
+ Entry->invalidate();
+ AvailEntries.push_front(Entry);
}
void empty() {
@@ -578,14 +533,10 @@ class MapAllocatorCache {
uptr N = 0;
{
ScopedLock L(Mutex);
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
- if (!Entries[I].isValid())
- continue;
- MapInfo[N] = Entries[I].MemMap;
- remove(I);
- N++;
- }
- EntriesCount = 0;
+
+ for (CachedBlock &Entry : LRUEntries)
+ MapInfo[N++] = Entry.MemMap;
+ LRUEntries.clear();
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
@@ -607,7 +558,7 @@ class MapAllocatorCache {
void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
- if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
+ if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
return;
OldestTime = 0;
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
@@ -617,7 +568,6 @@ class MapAllocatorCache {
}
HybridMutex Mutex;
- u32 EntriesCount GUARDED_BY(Mutex) = 0;
u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
@@ -630,12 +580,9 @@ class MapAllocatorCache {
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};
- // The LRUHead of the cache is the most recently used cache entry
- u16 LRUHead GUARDED_BY(Mutex) = 0;
- // The LRUTail of the cache is the least recently used cache entry
- u16 LRUTail GUARDED_BY(Mutex) = 0;
- // The AvailableHead is the top of the stack of available entries
- u16 AvailableHead GUARDED_BY(Mutex) = 0;
+ DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
+ // The unused Entries
+ SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex);
};
template <typename Config> class MapAllocator {
|
3205f3b
to
4a8bb4b
Compare
cferris1000
approved these changes
Dec 4, 2024
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
No description provided.