Skip to content

Commit 373d35d

Browse files
authored
[scudo] Added test fixture for cache tests. (llvm#102230)
The test fixture simplifies some of the logic for allocations and mmap-based allocations are separated from the cache to allow for more direct cache tests. Additionally, a couple of end to end tests for the cache and the LRU algorithm are added.
1 parent 42b5540 commit 373d35d

File tree

2 files changed

+108
-5
lines changed

2 files changed

+108
-5
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,11 @@ template <typename T> class NonZeroLengthArray<T, 0> {
178178
T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
179179
};
180180

181-
template <typename Config> class MapAllocatorCache {
181+
// The default unmap callback is simply scudo::unmap.
182+
// In testing, a different unmap callback is used to
183+
// record information about unmaps in the cache
184+
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
185+
class MapAllocatorCache {
182186
public:
183187
void getStats(ScopedString *Str) {
184188
ScopedLock L(Mutex);
@@ -246,6 +250,7 @@ template <typename Config> class MapAllocatorCache {
246250
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
247251
u64 Time;
248252
CachedBlock Entry;
253+
249254
Entry.CommitBase = CommitBase;
250255
Entry.CommitSize = CommitSize;
251256
Entry.BlockBegin = BlockBegin;
@@ -290,7 +295,7 @@ template <typename Config> class MapAllocatorCache {
290295
// read Options and when we locked Mutex. We can't insert our entry into
291296
// the quarantine or the cache because the permissions would be wrong so
292297
// just unmap it.
293-
unmap(Entry.MemMap);
298+
unmapCallBack(Entry.MemMap);
294299
break;
295300
}
296301
if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
@@ -321,7 +326,7 @@ template <typename Config> class MapAllocatorCache {
321326
} while (0);
322327

323328
for (MemMapT &EvictMemMap : EvictionMemMaps)
324-
unmap(EvictMemMap);
329+
unmapCallBack(EvictMemMap);
325330

326331
if (Interval >= 0) {
327332
// TODO: Add ReleaseToOS logic to LRU algorithm
@@ -423,7 +428,7 @@ template <typename Config> class MapAllocatorCache {
423428
for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
424429
if (Quarantine[I].isValid()) {
425430
MemMapT &MemMap = Quarantine[I].MemMap;
426-
unmap(MemMap);
431+
unmapCallBack(MemMap);
427432
Quarantine[I].invalidate();
428433
}
429434
}
@@ -517,7 +522,7 @@ template <typename Config> class MapAllocatorCache {
517522
}
518523
for (uptr I = 0; I < N; I++) {
519524
MemMapT &MemMap = MapInfo[I];
520-
unmap(MemMap);
525+
unmapCallBack(MemMap);
521526
}
522527
}
523528

compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,3 +265,101 @@ TEST_F(MapAllocatorWithReleaseTest, SecondaryThreadsRace) {
265265
Allocator->getStats(&Str);
266266
Str.output();
267267
}
268+
269+
struct MapAllocatorCacheTest : public Test {
270+
static constexpr scudo::u32 UnmappedMarker = 0xDEADBEEF;
271+
272+
static void testUnmapCallback(scudo::MemMapT &MemMap) {
273+
scudo::u32 *Ptr = reinterpret_cast<scudo::u32 *>(MemMap.getBase());
274+
*Ptr = UnmappedMarker;
275+
}
276+
277+
using SecondaryConfig = scudo::SecondaryConfig<TestConfig>;
278+
using CacheConfig = SecondaryConfig::CacheConfig;
279+
using CacheT = scudo::MapAllocatorCache<CacheConfig, testUnmapCallback>;
280+
281+
std::unique_ptr<CacheT> Cache = std::make_unique<CacheT>();
282+
283+
const scudo::uptr PageSize = scudo::getPageSizeCached();
284+
// The current test allocation size is set to the minimum size
285+
// needed for the scudo allocator to fall back to the secondary allocator
286+
static constexpr scudo::uptr TestAllocSize =
287+
CacheConfig::getDefaultMaxEntrySize();
288+
289+
scudo::Options Options = getOptionsForConfig<SecondaryConfig>();
290+
291+
void SetUp() override { Cache->init(/*ReleaseToOsInterval=*/-1); }
292+
293+
void TearDown() override { Cache->unmapTestOnly(); }
294+
295+
scudo::MemMapT allocate(scudo::uptr Size) {
296+
scudo::uptr MapSize = scudo::roundUp(Size, PageSize);
297+
scudo::ReservedMemoryT ReservedMemory;
298+
CHECK(ReservedMemory.create(0U, MapSize, nullptr, MAP_ALLOWNOMEM));
299+
300+
scudo::MemMapT MemMap = ReservedMemory.dispatch(
301+
ReservedMemory.getBase(), ReservedMemory.getCapacity());
302+
MemMap.remap(MemMap.getBase(), MemMap.getCapacity(), "scudo:test",
303+
MAP_RESIZABLE | MAP_ALLOWNOMEM);
304+
return MemMap;
305+
}
306+
307+
void fillCacheWithSameSizeBlocks(std::vector<scudo::MemMapT> &MemMaps,
308+
scudo::uptr NumEntries, scudo::uptr Size) {
309+
for (scudo::uptr I = 0; I < NumEntries; I++) {
310+
MemMaps.emplace_back(allocate(Size));
311+
auto &MemMap = MemMaps[I];
312+
Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
313+
MemMap.getBase(), MemMap);
314+
}
315+
}
316+
};
317+
318+
TEST_F(MapAllocatorCacheTest, CacheOrder) {
319+
std::vector<scudo::MemMapT> MemMaps;
320+
Cache->setOption(scudo::Option::MaxCacheEntriesCount,
321+
CacheConfig::getEntriesArraySize());
322+
323+
fillCacheWithSameSizeBlocks(MemMaps, CacheConfig::getEntriesArraySize(),
324+
TestAllocSize);
325+
326+
// Retrieval order should be the inverse of insertion order
327+
for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) {
328+
scudo::uptr EntryHeaderPos;
329+
scudo::CachedBlock Entry =
330+
Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos);
331+
EXPECT_EQ(Entry.MemMap.getBase(), MemMaps[I - 1].getBase());
332+
}
333+
334+
// Clean up MemMaps
335+
for (auto &MemMap : MemMaps)
336+
MemMap.unmap();
337+
}
338+
339+
TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
340+
std::vector<scudo::MemMapT> MemMaps;
341+
// Fill the cache above MaxEntriesCount to force an eviction
342+
// The first cache entry should be evicted (because it is the oldest)
343+
// due to the maximum number of entries being reached
344+
fillCacheWithSameSizeBlocks(
345+
MemMaps, CacheConfig::getDefaultMaxEntriesCount() + 1, TestAllocSize);
346+
347+
std::vector<scudo::CachedBlock> RetrievedEntries;
348+
349+
// First MemMap should be evicted from cache because it was the first
350+
// inserted into the cache
351+
for (scudo::uptr I = CacheConfig::getDefaultMaxEntriesCount(); I > 0; I--) {
352+
scudo::uptr EntryHeaderPos;
353+
RetrievedEntries.push_back(
354+
Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos));
355+
EXPECT_EQ(MemMaps[I].getBase(), RetrievedEntries.back().MemMap.getBase());
356+
}
357+
358+
// Evicted entry should be marked due to unmap callback
359+
EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(MemMaps[0].getBase()),
360+
UnmappedMarker);
361+
362+
// Clean up MemMaps
363+
for (auto &MemMap : MemMaps)
364+
MemMap.unmap();
365+
}

0 commit comments

Comments
 (0)