Skip to content

Commit 3d35246

Browse files
authored
[scudo] Make guard pages optional in the secondary (#125960)
Add an optional flag for the secondary allocator called `EnableGuardPages` to enable/disable the use of guard pages. By default, this option is enabled.
1 parent 7f7605d commit 3d35246

File tree

5 files changed

+160
-25
lines changed

5 files changed

+160
-25
lines changed

compiler-rt/lib/scudo/standalone/allocator_config.def

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@
3131
#ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE
3232
#define SECONDARY_REQUIRED_TEMPLATE_TYPE(...)
3333
#endif
34+
#ifndef SECONDARY_OPTIONAL
35+
#define SECONDARY_OPTIONAL(...)
36+
#endif
3437
#ifndef SECONDARY_CACHE_OPTIONAL
3538
#define SECONDARY_CACHE_OPTIONAL(...)
3639
#endif
@@ -109,6 +112,11 @@ PRIMARY_OPTIONAL_TYPE(CompactPtrT, uptr)
109112
// Defines the type of Secondary Cache to use.
110113
SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT)
111114

115+
// SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT)
116+
//
117+
// Add one guard page at the front and back for each allocation.
118+
SECONDARY_OPTIONAL(const bool, EnableGuardPages, true)
119+
112120
// SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT)
113121
//
114122
// Defines the type of cache used by the Secondary. Some additional
@@ -122,6 +130,7 @@ SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX)
122130
SECONDARY_CACHE_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN)
123131

124132
#undef SECONDARY_CACHE_OPTIONAL
133+
#undef SECONDARY_OPTIONAL
125134
#undef SECONDARY_REQUIRED_TEMPLATE_TYPE
126135
#undef PRIMARY_OPTIONAL_TYPE
127136
#undef PRIMARY_OPTIONAL

compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,13 @@ template <typename AllocatorConfig> struct SecondaryConfig {
9595
#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \
9696
template <typename T> \
9797
using NAME = typename AllocatorConfig::Secondary::template NAME<T>;
98+
99+
#define SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT) \
100+
OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \
101+
static constexpr removeConst<TYPE>::type get##NAME() { \
102+
return NAME##State<typename AllocatorConfig::Secondary>::getValue(); \
103+
}
104+
98105
#include "allocator_config.def"
99106

100107
struct CacheConfig {

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 30 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -614,6 +614,12 @@ template <typename Config> class MapAllocator {
614614
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
615615
}
616616

617+
static uptr getGuardPageSize() {
618+
if (Config::getEnableGuardPages())
619+
return getPageSizeCached();
620+
return 0U;
621+
}
622+
617623
static constexpr uptr getHeadersSize() {
618624
return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
619625
}
@@ -763,11 +769,11 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
763769

764770
uptr RoundedSize =
765771
roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
766-
if (Alignment > PageSize)
772+
if (UNLIKELY(Alignment > PageSize))
767773
RoundedSize += Alignment - PageSize;
768774

769775
ReservedMemoryT ReservedMemory;
770-
const uptr MapSize = RoundedSize + 2 * PageSize;
776+
const uptr MapSize = RoundedSize + 2 * getGuardPageSize();
771777
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
772778
MAP_ALLOWNOMEM))) {
773779
return nullptr;
@@ -777,7 +783,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
777783
MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
778784
ReservedMemory.getCapacity());
779785
uptr MapBase = MemMap.getBase();
780-
uptr CommitBase = MapBase + PageSize;
786+
uptr CommitBase = MapBase + getGuardPageSize();
781787
uptr MapEnd = MapBase + MapSize;
782788

783789
// In the unlikely event of alignments larger than a page, adjust the amount
@@ -786,32 +792,39 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
786792
// For alignments greater than or equal to a page, the user pointer (eg:
787793
// the pointer that is returned by the C or C++ allocation APIs) ends up
788794
// on a page boundary , and our headers will live in the preceding page.
789-
CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
790-
const uptr NewMapBase = CommitBase - PageSize;
791-
DCHECK_GE(NewMapBase, MapBase);
795+
CommitBase =
796+
roundUp(MapBase + getGuardPageSize() + 1, Alignment) - PageSize;
792797
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
793798
// are less constrained memory wise, and that saves us two syscalls.
794-
if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
795-
MemMap.unmap(MapBase, NewMapBase - MapBase);
796-
MapBase = NewMapBase;
797-
}
798-
const uptr NewMapEnd =
799-
CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
800-
DCHECK_LE(NewMapEnd, MapEnd);
801-
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
802-
MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
803-
MapEnd = NewMapEnd;
799+
if (SCUDO_WORDSIZE == 32U) {
800+
const uptr NewMapBase = CommitBase - getGuardPageSize();
801+
DCHECK_GE(NewMapBase, MapBase);
802+
if (NewMapBase != MapBase) {
803+
MemMap.unmap(MapBase, NewMapBase - MapBase);
804+
MapBase = NewMapBase;
805+
}
806+
// CommitBase is past the first guard page, but this computation needs
807+
// to include a page where the header lives.
808+
const uptr NewMapEnd =
809+
CommitBase + PageSize + roundUp(Size, PageSize) + getGuardPageSize();
810+
DCHECK_LE(NewMapEnd, MapEnd);
811+
if (NewMapEnd != MapEnd) {
812+
MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
813+
MapEnd = NewMapEnd;
814+
}
804815
}
805816
}
806817

807-
const uptr CommitSize = MapEnd - PageSize - CommitBase;
818+
const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase;
808819
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
809820
if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
810821
MemMap)) {
811822
unmap(MemMap);
812823
return nullptr;
813824
}
814825
const uptr HeaderPos = AllocPos - getHeadersSize();
826+
// Make sure that the header is not in the guard page or before the base.
827+
DCHECK_GE(HeaderPos, MapBase + getGuardPageSize());
815828
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
816829
LargeBlock::addHeaderTag<Config>(HeaderPos));
817830
if (useMemoryTagging<Config>(Options))

compiler-rt/lib/scudo/standalone/tests/combined_test.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,16 +54,18 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
5454
scudo::uptr Alignment) {
5555
const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
5656
Size = scudo::roundUp(Size, MinAlignment);
57-
if (Allocator->useMemoryTaggingTestOnly())
57+
if (Allocator->useMemoryTaggingTestOnly()) {
5858
EXPECT_DEATH(
5959
{
6060
disableDebuggerdMaybe();
6161
reinterpret_cast<char *>(P)[-1] = 'A';
6262
},
6363
"");
64+
}
6465
if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
6566
? Allocator->useMemoryTaggingTestOnly()
66-
: Alignment == MinAlignment) {
67+
: Alignment == MinAlignment &&
68+
AllocatorT::SecondaryT::getGuardPageSize() > 0) {
6769
EXPECT_DEATH(
6870
{
6971
disableDebuggerdMaybe();

compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp

Lines changed: 110 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,19 @@ struct TestNoCacheConfig {
8080
};
8181
};
8282

83+
struct TestNoCacheNoGuardPageConfig {
84+
static const bool MaySupportMemoryTagging = false;
85+
template <typename> using TSDRegistryT = void;
86+
template <typename> using PrimaryT = void;
87+
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
88+
89+
struct Secondary {
90+
template <typename Config>
91+
using CacheT = scudo::MapAllocatorNoCache<Config>;
92+
static const bool EnableGuardPages = false;
93+
};
94+
};
95+
8396
struct TestCacheConfig {
8497
static const bool MaySupportMemoryTagging = false;
8598
template <typename> using TSDRegistryT = void;
@@ -100,6 +113,27 @@ struct TestCacheConfig {
100113
};
101114
};
102115

116+
struct TestCacheNoGuardPageConfig {
117+
static const bool MaySupportMemoryTagging = false;
118+
template <typename> using TSDRegistryT = void;
119+
template <typename> using PrimaryT = void;
120+
template <typename> using SecondaryT = void;
121+
122+
struct Secondary {
123+
struct Cache {
124+
static const scudo::u32 EntriesArraySize = 128U;
125+
static const scudo::u32 QuarantineSize = 0U;
126+
static const scudo::u32 DefaultMaxEntriesCount = 64U;
127+
static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
128+
static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
129+
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
130+
};
131+
132+
template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
133+
static const bool EnableGuardPages = false;
134+
};
135+
};
136+
103137
template <typename Config> static void testBasic() {
104138
using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
105139
AllocatorInfoType<Config> Info;
@@ -146,15 +180,17 @@ template <typename Config> static void testBasic() {
146180

147181
TEST(ScudoSecondaryTest, Basic) {
148182
testBasic<TestNoCacheConfig>();
183+
testBasic<TestNoCacheNoGuardPageConfig>();
149184
testBasic<TestCacheConfig>();
185+
testBasic<TestCacheNoGuardPageConfig>();
150186
testBasic<scudo::DefaultConfig>();
151187
}
152188

153189
// This exercises a variety of combinations of size and alignment for the
154190
// MapAllocator. The size computation done here mimic the ones done by the
155191
// combined allocator.
156-
TEST(ScudoSecondaryTest, AllocatorCombinations) {
157-
AllocatorInfoType<TestNoCacheConfig> Info;
192+
template <typename Config> void testAllocatorCombinations() {
193+
AllocatorInfoType<Config> Info;
158194

159195
constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
160196
constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
@@ -180,8 +216,13 @@ TEST(ScudoSecondaryTest, AllocatorCombinations) {
180216
}
181217
}
182218

183-
TEST(ScudoSecondaryTest, AllocatorIterate) {
184-
AllocatorInfoType<TestNoCacheConfig> Info;
219+
TEST(ScudoSecondaryTest, AllocatorCombinations) {
220+
testAllocatorCombinations<TestNoCacheConfig>();
221+
testAllocatorCombinations<TestNoCacheNoGuardPageConfig>();
222+
}
223+
224+
template <typename Config> void testAllocatorIterate() {
225+
AllocatorInfoType<Config> Info;
185226

186227
std::vector<void *> V;
187228
for (scudo::uptr I = 0; I < 32U; I++)
@@ -201,8 +242,13 @@ TEST(ScudoSecondaryTest, AllocatorIterate) {
201242
}
202243
}
203244

204-
TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
205-
AllocatorInfoType<TestNoCacheConfig> Info(/*ReleaseToOsInterval=*/0);
245+
TEST(ScudoSecondaryTest, AllocatorIterate) {
246+
testAllocatorIterate<TestNoCacheConfig>();
247+
testAllocatorIterate<TestNoCacheNoGuardPageConfig>();
248+
}
249+
250+
template <typename Config> void testAllocatorWithReleaseThreadsRace() {
251+
AllocatorInfoType<Config> Info(/*ReleaseToOsInterval=*/0);
206252

207253
std::mutex Mutex;
208254
std::condition_variable Cv;
@@ -243,6 +289,64 @@ TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
243289
T.join();
244290
}
245291

292+
TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
293+
testAllocatorWithReleaseThreadsRace<TestNoCacheConfig>();
294+
testAllocatorWithReleaseThreadsRace<TestNoCacheNoGuardPageConfig>();
295+
}
296+
297+
template <typename Config>
298+
void testGetMappedSize(scudo::uptr Size, scudo::uptr *mapped,
299+
scudo::uptr *guard_page_size) {
300+
AllocatorInfoType<Config> Info;
301+
302+
scudo::uptr Stats[scudo::StatCount] = {};
303+
Info.GlobalStats.get(Stats);
304+
*mapped = Stats[scudo::StatMapped];
305+
Stats[scudo::StatMapped] = 0;
306+
307+
// Make sure the allocation is aligned to a page boundary so that the checks
308+
// in the tests can avoid problems due to allocations having different
309+
// alignments.
310+
void *Ptr = Info.Allocator->allocate(Info.Options, Size, PageSize);
311+
EXPECT_NE(Ptr, nullptr);
312+
313+
Info.GlobalStats.get(Stats);
314+
EXPECT_GE(Stats[scudo::StatMapped], *mapped);
315+
*mapped = Stats[scudo::StatMapped] - *mapped;
316+
317+
Info.Allocator->deallocate(Info.Options, Ptr);
318+
319+
*guard_page_size = Info.Allocator->getGuardPageSize();
320+
}
321+
322+
TEST(ScudoSecondaryTest, VerifyGuardPageOption) {
323+
static scudo::uptr AllocSize = 1000 * PageSize;
324+
325+
// Verify that a config with guard pages enabled:
326+
// - Non-zero sized guard page
327+
// - Mapped in at least the size of the allocation plus 2 * guard page size
328+
scudo::uptr guard_mapped = 0;
329+
scudo::uptr guard_page_size = 0;
330+
testGetMappedSize<TestNoCacheConfig>(AllocSize, &guard_mapped,
331+
&guard_page_size);
332+
EXPECT_GT(guard_page_size, 0U);
333+
EXPECT_GE(guard_mapped, AllocSize + 2 * guard_page_size);
334+
335+
// Verify that a config with guard pages disabled:
336+
// - Zero sized guard page
337+
// - The total mapped in is greater than the allocation size
338+
scudo::uptr no_guard_mapped = 0;
339+
scudo::uptr no_guard_page_size = 0;
340+
testGetMappedSize<TestNoCacheNoGuardPageConfig>(AllocSize, &no_guard_mapped,
341+
&no_guard_page_size);
342+
EXPECT_EQ(no_guard_page_size, 0U);
343+
EXPECT_GE(no_guard_mapped, AllocSize);
344+
345+
// Verify that a guard page config mapped in at least twice the size of
346+
// their guard page when compared to a no guard page config.
347+
EXPECT_GE(guard_mapped, no_guard_mapped + guard_page_size * 2);
348+
}
349+
246350
// Value written to cache entries that are unmapped.
247351
static scudo::u32 UnmappedMarker = 0xDEADBEEF;
248352

0 commit comments

Comments
 (0)