Skip to content

[scudo] Make guard pages optional in the secondary #125960

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions compiler-rt/lib/scudo/standalone/allocator_config.def
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@
#ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE
#define SECONDARY_REQUIRED_TEMPLATE_TYPE(...)
#endif
#ifndef SECONDARY_OPTIONAL
#define SECONDARY_OPTIONAL(...)
#endif
#ifndef SECONDARY_CACHE_OPTIONAL
#define SECONDARY_CACHE_OPTIONAL(...)
#endif
Expand Down Expand Up @@ -109,6 +112,11 @@ PRIMARY_OPTIONAL_TYPE(CompactPtrT, uptr)
// Defines the type of Secondary Cache to use.
SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT)

// SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT)
//
// Add one guard page at the front and back for each allocation.
SECONDARY_OPTIONAL(const bool, EnableGuardPages, true)

// SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT)
//
// Defines the type of cache used by the Secondary. Some additional
Expand All @@ -122,6 +130,7 @@ SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX)
SECONDARY_CACHE_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN)

#undef SECONDARY_CACHE_OPTIONAL
#undef SECONDARY_OPTIONAL
#undef SECONDARY_REQUIRED_TEMPLATE_TYPE
#undef PRIMARY_OPTIONAL_TYPE
#undef PRIMARY_OPTIONAL
Expand Down
7 changes: 7 additions & 0 deletions compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,13 @@ template <typename AllocatorConfig> struct SecondaryConfig {
#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \
template <typename T> \
using NAME = typename AllocatorConfig::Secondary::template NAME<T>;

#define SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT) \
OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \
static constexpr removeConst<TYPE>::type get##NAME() { \
return NAME##State<typename AllocatorConfig::Secondary>::getValue(); \
}

#include "allocator_config.def"

struct CacheConfig {
Expand Down
47 changes: 30 additions & 17 deletions compiler-rt/lib/scudo/standalone/secondary.h
Original file line number Diff line number Diff line change
Expand Up @@ -614,6 +614,12 @@ template <typename Config> class MapAllocator {
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
}

static uptr getGuardPageSize() {
if (Config::getEnableGuardPages())
return getPageSizeCached();
return 0U;
}

static constexpr uptr getHeadersSize() {
return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
}
Expand Down Expand Up @@ -763,11 +769,11 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,

uptr RoundedSize =
roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
if (Alignment > PageSize)
if (UNLIKELY(Alignment > PageSize))
RoundedSize += Alignment - PageSize;

ReservedMemoryT ReservedMemory;
const uptr MapSize = RoundedSize + 2 * PageSize;
const uptr MapSize = RoundedSize + 2 * getGuardPageSize();
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
MAP_ALLOWNOMEM))) {
return nullptr;
Expand All @@ -777,7 +783,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
ReservedMemory.getCapacity());
uptr MapBase = MemMap.getBase();
uptr CommitBase = MapBase + PageSize;
uptr CommitBase = MapBase + getGuardPageSize();
uptr MapEnd = MapBase + MapSize;

// In the unlikely event of alignments larger than a page, adjust the amount
Expand All @@ -786,32 +792,39 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
// For alignments greater than or equal to a page, the user pointer (eg:
// the pointer that is returned by the C or C++ allocation APIs) ends up
// on a page boundary , and our headers will live in the preceding page.
CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
CommitBase =
roundUp(MapBase + getGuardPageSize() + 1, Alignment) - PageSize;
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
// are less constrained memory wise, and that saves us two syscalls.
if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
MemMap.unmap(MapBase, NewMapBase - MapBase);
MapBase = NewMapBase;
}
const uptr NewMapEnd =
CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
if (SCUDO_WORDSIZE == 32U) {
const uptr NewMapBase = CommitBase - getGuardPageSize();
DCHECK_GE(NewMapBase, MapBase);
if (NewMapBase != MapBase) {
MemMap.unmap(MapBase, NewMapBase - MapBase);
MapBase = NewMapBase;
}
// CommitBase is past the first guard page, but this computation needs
// to include a page where the header lives.
const uptr NewMapEnd =
CommitBase + PageSize + roundUp(Size, PageSize) + getGuardPageSize();
DCHECK_LE(NewMapEnd, MapEnd);
if (NewMapEnd != MapEnd) {
MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
}
}
}

const uptr CommitSize = MapEnd - PageSize - CommitBase;
const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase;
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
MemMap)) {
unmap(MemMap);
return nullptr;
}
const uptr HeaderPos = AllocPos - getHeadersSize();
// Make sure that the header is not in the guard page or before the base.
DCHECK_GE(HeaderPos, MapBase + getGuardPageSize());
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
if (useMemoryTagging<Config>(Options))
Expand Down
6 changes: 4 additions & 2 deletions compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,16 +54,18 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
scudo::uptr Alignment) {
const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
Size = scudo::roundUp(Size, MinAlignment);
if (Allocator->useMemoryTaggingTestOnly())
if (Allocator->useMemoryTaggingTestOnly()) {
EXPECT_DEATH(
{
disableDebuggerdMaybe();
reinterpret_cast<char *>(P)[-1] = 'A';
},
"");
}
if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
? Allocator->useMemoryTaggingTestOnly()
: Alignment == MinAlignment) {
: Alignment == MinAlignment &&
AllocatorT::SecondaryT::getGuardPageSize() > 0) {
EXPECT_DEATH(
{
disableDebuggerdMaybe();
Expand Down
116 changes: 110 additions & 6 deletions compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,19 @@ struct TestNoCacheConfig {
};
};

struct TestNoCacheNoGuardPageConfig {
static const bool MaySupportMemoryTagging = false;
template <typename> using TSDRegistryT = void;
template <typename> using PrimaryT = void;
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;

struct Secondary {
template <typename Config>
using CacheT = scudo::MapAllocatorNoCache<Config>;
static const bool EnableGuardPages = false;
};
};

struct TestCacheConfig {
static const bool MaySupportMemoryTagging = false;
template <typename> using TSDRegistryT = void;
Expand All @@ -100,6 +113,27 @@ struct TestCacheConfig {
};
};

struct TestCacheNoGuardPageConfig {
static const bool MaySupportMemoryTagging = false;
template <typename> using TSDRegistryT = void;
template <typename> using PrimaryT = void;
template <typename> using SecondaryT = void;

struct Secondary {
struct Cache {
static const scudo::u32 EntriesArraySize = 128U;
static const scudo::u32 QuarantineSize = 0U;
static const scudo::u32 DefaultMaxEntriesCount = 64U;
static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
};

template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
static const bool EnableGuardPages = false;
};
};

template <typename Config> static void testBasic() {
using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
AllocatorInfoType<Config> Info;
Expand Down Expand Up @@ -146,15 +180,17 @@ template <typename Config> static void testBasic() {

TEST(ScudoSecondaryTest, Basic) {
testBasic<TestNoCacheConfig>();
testBasic<TestNoCacheNoGuardPageConfig>();
testBasic<TestCacheConfig>();
testBasic<TestCacheNoGuardPageConfig>();
testBasic<scudo::DefaultConfig>();
}

// This exercises a variety of combinations of size and alignment for the
// MapAllocator. The size computation done here mimic the ones done by the
// combined allocator.
TEST(ScudoSecondaryTest, AllocatorCombinations) {
AllocatorInfoType<TestNoCacheConfig> Info;
template <typename Config> void testAllocatorCombinations() {
AllocatorInfoType<Config> Info;

constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
Expand All @@ -180,8 +216,13 @@ TEST(ScudoSecondaryTest, AllocatorCombinations) {
}
}

TEST(ScudoSecondaryTest, AllocatorIterate) {
AllocatorInfoType<TestNoCacheConfig> Info;
TEST(ScudoSecondaryTest, AllocatorCombinations) {
testAllocatorCombinations<TestNoCacheConfig>();
testAllocatorCombinations<TestNoCacheNoGuardPageConfig>();
}

template <typename Config> void testAllocatorIterate() {
AllocatorInfoType<Config> Info;

std::vector<void *> V;
for (scudo::uptr I = 0; I < 32U; I++)
Expand All @@ -201,8 +242,13 @@ TEST(ScudoSecondaryTest, AllocatorIterate) {
}
}

TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
AllocatorInfoType<TestNoCacheConfig> Info(/*ReleaseToOsInterval=*/0);
TEST(ScudoSecondaryTest, AllocatorIterate) {
testAllocatorIterate<TestNoCacheConfig>();
testAllocatorIterate<TestNoCacheNoGuardPageConfig>();
}

template <typename Config> void testAllocatorWithReleaseThreadsRace() {
AllocatorInfoType<Config> Info(/*ReleaseToOsInterval=*/0);

std::mutex Mutex;
std::condition_variable Cv;
Expand Down Expand Up @@ -243,6 +289,64 @@ TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
T.join();
}

TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
testAllocatorWithReleaseThreadsRace<TestNoCacheConfig>();
testAllocatorWithReleaseThreadsRace<TestNoCacheNoGuardPageConfig>();
}

template <typename Config>
void testGetMappedSize(scudo::uptr Size, scudo::uptr *mapped,
scudo::uptr *guard_page_size) {
AllocatorInfoType<Config> Info;

scudo::uptr Stats[scudo::StatCount] = {};
Info.GlobalStats.get(Stats);
*mapped = Stats[scudo::StatMapped];
Stats[scudo::StatMapped] = 0;

// Make sure the allocation is aligned to a page boundary so that the checks
// in the tests can avoid problems due to allocations having different
// alignments.
void *Ptr = Info.Allocator->allocate(Info.Options, Size, PageSize);
EXPECT_NE(Ptr, nullptr);

Info.GlobalStats.get(Stats);
EXPECT_GE(Stats[scudo::StatMapped], *mapped);
*mapped = Stats[scudo::StatMapped] - *mapped;

Info.Allocator->deallocate(Info.Options, Ptr);

*guard_page_size = Info.Allocator->getGuardPageSize();
}

TEST(ScudoSecondaryTest, VerifyGuardPageOption) {
static scudo::uptr AllocSize = 1000 * PageSize;

// Verify that a config with guard pages enabled:
// - Non-zero sized guard page
// - Mapped in at least the size of the allocation plus 2 * guard page size
scudo::uptr guard_mapped = 0;
scudo::uptr guard_page_size = 0;
testGetMappedSize<TestNoCacheConfig>(AllocSize, &guard_mapped,
&guard_page_size);
EXPECT_GT(guard_page_size, 0U);
EXPECT_GE(guard_mapped, AllocSize + 2 * guard_page_size);

// Verify that a config with guard pages disabled:
// - Zero sized guard page
// - The total mapped in is greater than the allocation size
scudo::uptr no_guard_mapped = 0;
scudo::uptr no_guard_page_size = 0;
testGetMappedSize<TestNoCacheNoGuardPageConfig>(AllocSize, &no_guard_mapped,
&no_guard_page_size);
EXPECT_EQ(no_guard_page_size, 0U);
EXPECT_GE(no_guard_mapped, AllocSize);

// Verify that a guard page config mapped in at least twice the size of
// their guard page when compared to a no guard page config.
EXPECT_GE(guard_mapped, no_guard_mapped + guard_page_size * 2);
}

// Value written to cache entries that are unmapped.
static scudo::u32 UnmappedMarker = 0xDEADBEEF;

Expand Down