Skip to content

Commit 60e07c6

Browse files
authored
Revert "[scudo] Add partial chunk heuristic to retrieval algorithm. (#104807)"
This reverts commit 27dc247.
1 parent a9ce181 commit 60e07c6

File tree

2 files changed

+34
-123
lines changed

2 files changed

+34
-123
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 30 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,6 @@ namespace {
7272
struct CachedBlock {
7373
static constexpr u16 CacheIndexMax = UINT16_MAX;
7474
static constexpr u16 InvalidEntry = CacheIndexMax;
75-
// * ReleaseMemoryUpperBound default is currently 16 KB
76-
// - We arrived at this value after noticing that mapping
77-
// in larger memory regions performs better than releasing
78-
// memory and forcing a cache hit. According to the data,
79-
// it suggests that beyond 16KB, the release execution time is
80-
// longer than the map execution time. In this way, the default
81-
// is dependent on the platform.
82-
static constexpr uptr ReleaseMemoryUpperBound = 1 << 14;
8375

8476
uptr CommitBase = 0;
8577
uptr CommitSize = 0;
@@ -98,9 +90,8 @@ struct CachedBlock {
9890
template <typename Config> class MapAllocatorNoCache {
9991
public:
10092
void init(UNUSED s32 ReleaseToOsInterval) {}
101-
CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size,
102-
UNUSED uptr Alignment, UNUSED uptr HeadersSize,
103-
UNUSED uptr &EntryHeaderPos) {
93+
CachedBlock retrieve(UNUSED uptr Size, UNUSED uptr Alignment,
94+
UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
10495
return {};
10596
}
10697
void store(UNUSED Options Options, UNUSED uptr CommitBase,
@@ -343,103 +334,61 @@ class MapAllocatorCache {
343334
}
344335
}
345336

346-
CachedBlock retrieve(uptr MaxAllowedFragmentedBytes, uptr Size,
347-
uptr Alignment, uptr HeadersSize, uptr &EntryHeaderPos)
348-
EXCLUDES(Mutex) {
337+
CachedBlock retrieve(uptr Size, uptr Alignment, uptr HeadersSize,
338+
uptr &EntryHeaderPos) EXCLUDES(Mutex) {
349339
const uptr PageSize = getPageSizeCached();
350340
// 10% of the requested size proved to be the optimal choice for
351341
// retrieving cached blocks after testing several options.
352342
constexpr u32 FragmentedBytesDivisor = 10;
353-
bool FoundOptimalFit = false;
343+
bool Found = false;
354344
CachedBlock Entry;
355345
EntryHeaderPos = 0;
356346
{
357347
ScopedLock L(Mutex);
358348
CallsToRetrieve++;
359349
if (EntriesCount == 0)
360350
return {};
361-
u16 RetrievedIndex = CachedBlock::InvalidEntry;
351+
u32 OptimalFitIndex = 0;
362352
uptr MinDiff = UINTPTR_MAX;
363-
364-
// Since allocation sizes don't always match cached memory chunk sizes
365-
// we allow some memory to be unused (called fragmented bytes). The
366-
// amount of unused bytes is exactly EntryHeaderPos - CommitBase.
367-
//
368-
// CommitBase CommitBase + CommitSize
369-
// V V
370-
// +---+------------+-----------------+---+
371-
// | | | | |
372-
// +---+------------+-----------------+---+
373-
// ^ ^ ^
374-
// Guard EntryHeaderPos Guard-page-end
375-
// page-begin
376-
//
377-
// [EntryHeaderPos, CommitBase + CommitSize) contains the user data as
378-
// well as the header metadata. If EntryHeaderPos - CommitBase exceeds
379-
// MaxAllowedFragmentedBytes, the cached memory chunk is not considered
380-
// valid for retrieval.
381-
for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
353+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
382354
I = Entries[I].Next) {
383355
const uptr CommitBase = Entries[I].CommitBase;
384356
const uptr CommitSize = Entries[I].CommitSize;
385357
const uptr AllocPos =
386358
roundDown(CommitBase + CommitSize - Size, Alignment);
387359
const uptr HeaderPos = AllocPos - HeadersSize;
388-
if (HeaderPos > CommitBase + CommitSize || HeaderPos < CommitBase)
360+
if (HeaderPos > CommitBase + CommitSize)
389361
continue;
390-
391-
const uptr Diff = roundDown(HeaderPos, PageSize) - CommitBase;
392-
393-
if (Diff > MaxAllowedFragmentedBytes || Diff >= MinDiff)
362+
if (HeaderPos < CommitBase ||
363+
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
394364
continue;
395-
396-
MinDiff = Diff;
397-
RetrievedIndex = I;
398-
EntryHeaderPos = HeaderPos;
399-
400-
const uptr OptimalFitThesholdBytes =
365+
}
366+
Found = true;
367+
const uptr Diff = HeaderPos - CommitBase;
368+
// immediately use a cached block if it's size is close enough to the
369+
// requested size.
370+
const uptr MaxAllowedFragmentedBytes =
401371
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
402-
if (Diff <= OptimalFitThesholdBytes) {
403-
FoundOptimalFit = true;
372+
if (Diff <= MaxAllowedFragmentedBytes) {
373+
OptimalFitIndex = I;
374+
EntryHeaderPos = HeaderPos;
404375
break;
405376
}
377+
// keep track of the smallest cached block
378+
// that is greater than (AllocSize + HeaderSize)
379+
if (Diff > MinDiff)
380+
continue;
381+
OptimalFitIndex = I;
382+
MinDiff = Diff;
383+
EntryHeaderPos = HeaderPos;
406384
}
407-
if (RetrievedIndex != CachedBlock::InvalidEntry) {
408-
Entry = Entries[RetrievedIndex];
409-
remove(RetrievedIndex);
385+
if (Found) {
386+
Entry = Entries[OptimalFitIndex];
387+
remove(OptimalFitIndex);
410388
SuccessfulRetrieves++;
411389
}
412390
}
413391

414-
// The difference between the retrieved memory chunk and the request
415-
// size is at most MaxAllowedFragmentedBytes
416-
//
417-
// / MaxAllowedFragmentedBytes \
418-
// +--------------------------+-----------+
419-
// | | |
420-
// +--------------------------+-----------+
421-
// \ Bytes to be released / ^
422-
// |
423-
// (may or may not be commited)
424-
//
425-
// The maximum number of bytes released to the OS is capped by
426-
// ReleaseMemoryUpperBound
427-
//
428-
// TODO : Consider making ReleaseMemoryUpperBound configurable since
429-
// the release to OS API can vary across systems.
430-
if (!FoundOptimalFit && Entry.Time != 0) {
431-
const uptr FragmentedBytes =
432-
roundDown(EntryHeaderPos, PageSize) - Entry.CommitBase;
433-
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
434-
if (FragmentedBytes > MaxUnusedCacheBytes) {
435-
uptr BytesToRelease =
436-
roundUp(Min<uptr>(CachedBlock::ReleaseMemoryUpperBound,
437-
FragmentedBytes - MaxUnusedCacheBytes),
438-
PageSize);
439-
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, BytesToRelease);
440-
}
441-
}
442-
443392
return Entry;
444393
}
445394

@@ -710,18 +659,8 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
710659
FillContentsMode FillContents) {
711660
CachedBlock Entry;
712661
uptr EntryHeaderPos;
713-
uptr MaxAllowedFragmentedBytes;
714-
const uptr PageSize = getPageSizeCached();
715-
716-
if (LIKELY(!useMemoryTagging<Config>(Options))) {
717-
MaxAllowedFragmentedBytes =
718-
MaxUnusedCachePages * PageSize + CachedBlock::ReleaseMemoryUpperBound;
719-
} else {
720-
MaxAllowedFragmentedBytes = MaxUnusedCachePages * PageSize;
721-
}
722662

723-
Entry = Cache.retrieve(MaxAllowedFragmentedBytes, Size, Alignment,
724-
getHeadersSize(), EntryHeaderPos);
663+
Entry = Cache.retrieve(Size, Alignment, getHeadersSize(), EntryHeaderPos);
725664
if (!Entry.isValid())
726665
return nullptr;
727666

compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp

Lines changed: 4 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -281,8 +281,8 @@ struct MapAllocatorCacheTest : public Test {
281281
std::unique_ptr<CacheT> Cache = std::make_unique<CacheT>();
282282

283283
const scudo::uptr PageSize = scudo::getPageSizeCached();
284-
// The current test allocation size is set to the maximum
285-
// cache entry size
284+
// The current test allocation size is set to the minimum size
285+
// needed for the scudo allocator to fall back to the secondary allocator
286286
static constexpr scudo::uptr TestAllocSize =
287287
CacheConfig::getDefaultMaxEntrySize();
288288

@@ -327,8 +327,7 @@ TEST_F(MapAllocatorCacheTest, CacheOrder) {
327327
for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) {
328328
scudo::uptr EntryHeaderPos;
329329
scudo::CachedBlock Entry =
330-
Cache->retrieve(scudo::MaxUnusedCachePages * PageSize, TestAllocSize,
331-
PageSize, 0, EntryHeaderPos);
330+
Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos);
332331
EXPECT_EQ(Entry.MemMap.getBase(), MemMaps[I - 1].getBase());
333332
}
334333

@@ -337,32 +336,6 @@ TEST_F(MapAllocatorCacheTest, CacheOrder) {
337336
MemMap.unmap();
338337
}
339338

340-
TEST_F(MapAllocatorCacheTest, PartialChunkHeuristicRetrievalTest) {
341-
const scudo::uptr MaxUnusedCacheBytes = PageSize;
342-
const scudo::uptr FragmentedBytes =
343-
MaxUnusedCacheBytes + scudo::CachedBlock::ReleaseMemoryUpperBound;
344-
345-
scudo::uptr EntryHeaderPos;
346-
scudo::CachedBlock Entry;
347-
scudo::MemMapT MemMap = allocate(PageSize + FragmentedBytes);
348-
Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
349-
MemMap.getBase(), MemMap);
350-
351-
// FragmentedBytes > MaxAllowedFragmentedBytes so PageSize
352-
// cannot be retrieved from the cache
353-
Entry = Cache->retrieve(/*MaxAllowedFragmentedBytes=*/0, PageSize, PageSize,
354-
0, EntryHeaderPos);
355-
EXPECT_FALSE(Entry.isValid());
356-
357-
// FragmentedBytes <= MaxAllowedFragmentedBytes so PageSize
358-
// can be retrieved from the cache
359-
Entry =
360-
Cache->retrieve(FragmentedBytes, PageSize, PageSize, 0, EntryHeaderPos);
361-
EXPECT_TRUE(Entry.isValid());
362-
363-
MemMap.unmap();
364-
}
365-
366339
TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
367340
std::vector<scudo::MemMapT> MemMaps;
368341
// Fill the cache above MaxEntriesCount to force an eviction
@@ -378,8 +351,7 @@ TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
378351
for (scudo::uptr I = CacheConfig::getDefaultMaxEntriesCount(); I > 0; I--) {
379352
scudo::uptr EntryHeaderPos;
380353
RetrievedEntries.push_back(
381-
Cache->retrieve(scudo::MaxUnusedCachePages * PageSize, TestAllocSize,
382-
PageSize, 0, EntryHeaderPos));
354+
Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos));
383355
EXPECT_EQ(MemMaps[I].getBase(), RetrievedEntries.back().MemMap.getBase());
384356
}
385357

0 commit comments

Comments
 (0)