Skip to content

Commit 134cb88

Browse files
authored
[scudo] Use a tryLock in secondary release to OS (#132827)
In the caching part of the secondary path, when about to try to release memory to the OS, we always wait while acquiring the lock. However, if multiple threads are attempting this at the same time, all other threads will likely do nothing when the release call is made. Change the algorithm to skip the release if there is another release in process. Also, pull the lock outside the releaseOlderThan function. This is so that in the store path, we use the tryLock and skip if another thread is releasing. But in the path where a forced release call is being made, that call will wait for release to complete which guarantees that all entries are released when requested.
1 parent c995db3 commit 134cb88

File tree

1 file changed

+24
-10
lines changed

1 file changed

+24
-10
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -206,11 +206,13 @@ class MapAllocatorCache {
206206
computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
207207
&Fractional);
208208
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
209-
Str->append(
210-
"Stats: MapAllocatorCache: EntriesCount: %zu, "
211-
"MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
212-
LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
213-
atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
209+
Str->append("Stats: MapAllocatorCache: EntriesCount: %zu, "
210+
"MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsSkips: "
211+
"%zu, ReleaseToOsIntervalMs = %d\n",
212+
LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
213+
atomic_load_relaxed(&MaxEntrySize),
214+
atomic_load_relaxed(&ReleaseToOsSkips),
215+
Interval >= 0 ? Interval : -1);
214216
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
215217
"(%zu.%02zu%%)\n",
216218
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
@@ -343,8 +345,15 @@ class MapAllocatorCache {
343345
unmapCallBack(EvictMemMap);
344346

345347
if (Interval >= 0) {
346-
// TODO: Add ReleaseToOS logic to LRU algorithm
347-
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
348+
// It is very likely that multiple threads trying to do a release at the
349+
// same time will not actually release any extra elements. Therefore,
350+
// let any other thread continue, skipping the release.
351+
if (Mutex.tryLock()) {
352+
// TODO: Add ReleaseToOS logic to LRU algorithm
353+
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
354+
Mutex.unlock();
355+
} else
356+
atomic_fetch_add(&ReleaseToOsSkips, 1U, memory_order_relaxed);
348357
}
349358
}
350359

@@ -488,7 +497,12 @@ class MapAllocatorCache {
488497
return true;
489498
}
490499

491-
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
500+
void releaseToOS() EXCLUDES(Mutex) {
501+
// Since this is a request to release everything, always wait for the
502+
// lock so that we guarantee all entries are released after this call.
503+
ScopedLock L(Mutex);
504+
releaseOlderThan(UINT64_MAX);
505+
}
492506

493507
void disableMemoryTagging() EXCLUDES(Mutex) {
494508
ScopedLock L(Mutex);
@@ -554,8 +568,7 @@ class MapAllocatorCache {
554568
Entry.Time = 0;
555569
}
556570

557-
void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
558-
ScopedLock L(Mutex);
571+
void releaseOlderThan(u64 Time) REQUIRES(Mutex) {
559572
if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
560573
return;
561574
OldestTime = 0;
@@ -573,6 +586,7 @@ class MapAllocatorCache {
573586
atomic_s32 ReleaseToOsIntervalMs = {};
574587
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
575588
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
589+
atomic_uptr ReleaseToOsSkips = {};
576590

577591
CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
578592
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>

0 commit comments

Comments
 (0)