Skip to content

Commit 6dd6d48

Browse files
authored
[NFC] Make RingBuffer an atomic pointer (#82547)
This will allow us to atomically swap out RingBuffer and StackDepot. Patched into AOSP and ran debuggerd_tests.
1 parent a64ff96 commit 6dd6d48

File tree

1 file changed

+86
-62
lines changed

1 file changed

+86
-62
lines changed

compiler-rt/lib/scudo/standalone/combined.h

Lines changed: 86 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,18 @@ class Allocator {
177177
mapAndInitializeRingBuffer();
178178
}
179179

180+
void enableRingBuffer() {
181+
AllocationRingBuffer *RB = getRingBuffer();
182+
if (RB)
183+
RB->Depot->enable();
184+
}
185+
186+
void disableRingBuffer() {
187+
AllocationRingBuffer *RB = getRingBuffer();
188+
if (RB)
189+
RB->Depot->disable();
190+
}
191+
180192
// Initialize the embedded GWP-ASan instance. Requires the main allocator to
181193
// be functional, best called from PostInitCallback.
182194
void initGwpAsan() {
@@ -688,14 +700,12 @@ class Allocator {
688700
Quarantine.disable();
689701
Primary.disable();
690702
Secondary.disable();
691-
if (Depot)
692-
Depot->disable();
703+
disableRingBuffer();
693704
}
694705

695706
void enable() NO_THREAD_SAFETY_ANALYSIS {
696707
initThreadMaybe();
697-
if (Depot)
698-
Depot->enable();
708+
enableRingBuffer();
699709
Secondary.enable();
700710
Primary.enable();
701711
Quarantine.enable();
@@ -920,12 +930,14 @@ class Allocator {
920930

921931
const char *getStackDepotAddress() {
922932
initThreadMaybe();
923-
return reinterpret_cast<char *>(Depot);
933+
AllocationRingBuffer *RB = getRingBuffer();
934+
return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
924935
}
925936

926937
uptr getStackDepotSize() {
927938
initThreadMaybe();
928-
return StackDepotSize;
939+
AllocationRingBuffer *RB = getRingBuffer();
940+
return RB ? RB->StackDepotSize : 0;
929941
}
930942

931943
const char *getRegionInfoArrayAddress() const {
@@ -938,12 +950,15 @@ class Allocator {
938950

939951
const char *getRingBufferAddress() {
940952
initThreadMaybe();
941-
return RawRingBuffer;
953+
return reinterpret_cast<char *>(getRingBuffer());
942954
}
943955

944956
uptr getRingBufferSize() {
945957
initThreadMaybe();
946-
return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
958+
AllocationRingBuffer *RB = getRingBuffer();
959+
return RB && RB->RingBufferElements
960+
? ringBufferSizeInBytes(RB->RingBufferElements)
961+
: 0;
947962
}
948963

949964
static const uptr MaxTraceSize = 64;
@@ -1048,10 +1063,6 @@ class Allocator {
10481063
uptr GuardedAllocSlotSize = 0;
10491064
#endif // GWP_ASAN_HOOKS
10501065

1051-
StackDepot *Depot = nullptr;
1052-
uptr StackDepotSize = 0;
1053-
MemMapT RawStackDepotMap;
1054-
10551066
struct AllocationRingBuffer {
10561067
struct Entry {
10571068
atomic_uptr Ptr;
@@ -1061,16 +1072,23 @@ class Allocator {
10611072
atomic_u32 DeallocationTrace;
10621073
atomic_u32 DeallocationTid;
10631074
};
1064-
1075+
StackDepot *Depot = nullptr;
1076+
uptr StackDepotSize = 0;
1077+
MemMapT RawRingBufferMap;
1078+
MemMapT RawStackDepotMap;
1079+
u32 RingBufferElements = 0;
10651080
atomic_uptr Pos;
10661081
// An array of Size (at least one) elements of type Entry is immediately
10671082
// following to this struct.
10681083
};
10691084
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
10701085
// and immediately followed by Size elements of type Entry.
1071-
char *RawRingBuffer = {};
1072-
u32 RingBufferElements = 0;
1073-
MemMapT RawRingBufferMap;
1086+
atomic_uptr RingBufferAddress = {};
1087+
1088+
AllocationRingBuffer *getRingBuffer() {
1089+
return reinterpret_cast<AllocationRingBuffer *>(
1090+
atomic_load(&RingBufferAddress, memory_order_acquire));
1091+
}
10741092

10751093
// The following might get optimized out by the compiler.
10761094
NOINLINE void performSanityChecks() {
@@ -1259,27 +1277,24 @@ class Allocator {
12591277
storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
12601278
}
12611279

1262-
StackDepot *getDepotIfEnabled(const Options &Options) {
1263-
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1264-
return nullptr;
1265-
return Depot;
1266-
}
1267-
12681280
void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
1269-
auto *Depot = getDepotIfEnabled(Options);
1270-
if (!Depot)
1281+
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1282+
return;
1283+
AllocationRingBuffer *RB = getRingBuffer();
1284+
if (!RB)
12711285
return;
12721286
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1273-
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot);
1287+
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
12741288
Ptr32[MemTagAllocationTidIndex] = getThreadID();
12751289
}
12761290

1277-
void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1291+
void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
1292+
u32 AllocationTrace, u32 AllocationTid,
12781293
uptr AllocationSize, u32 DeallocationTrace,
12791294
u32 DeallocationTid) {
1280-
uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
1295+
uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
12811296
typename AllocationRingBuffer::Entry *Entry =
1282-
getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
1297+
getRingBufferEntry(RB, Pos % RB->RingBufferElements);
12831298

12841299
// First invalidate our entry so that we don't attempt to interpret a
12851300
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1300,32 +1315,36 @@ class Allocator {
13001315

13011316
void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
13021317
uptr Size) {
1303-
auto *Depot = getDepotIfEnabled(Options);
1304-
if (!Depot)
1318+
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
13051319
return;
1306-
u32 Trace = collectStackTrace(Depot);
1320+
AllocationRingBuffer *RB = getRingBuffer();
1321+
if (!RB)
1322+
return;
1323+
u32 Trace = collectStackTrace(RB->Depot);
13071324
u32 Tid = getThreadID();
13081325

13091326
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
13101327
Ptr32[MemTagAllocationTraceIndex] = Trace;
13111328
Ptr32[MemTagAllocationTidIndex] = Tid;
13121329

1313-
storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
1330+
storeRingBufferEntry(RB, untagPointer(Ptr), Trace, Tid, Size, 0, 0);
13141331
}
13151332

13161333
void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
13171334
u8 PrevTag, uptr Size) {
1318-
auto *Depot = getDepotIfEnabled(Options);
1319-
if (!Depot)
1335+
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1336+
return;
1337+
AllocationRingBuffer *RB = getRingBuffer();
1338+
if (!RB)
13201339
return;
13211340
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
13221341
u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
13231342
u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
13241343

1325-
u32 DeallocationTrace = collectStackTrace(Depot);
1344+
u32 DeallocationTrace = collectStackTrace(RB->Depot);
13261345
u32 DeallocationTid = getThreadID();
13271346

1328-
storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
1347+
storeRingBufferEntry(RB, addFixedTag(untagPointer(Ptr), PrevTag),
13291348
AllocationTrace, AllocationTid, Size,
13301349
DeallocationTrace, DeallocationTid);
13311350
}
@@ -1434,7 +1453,7 @@ class Allocator {
14341453
for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
14351454
NextErrorReport != NumErrorReports;
14361455
--I) {
1437-
auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
1456+
auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
14381457
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
14391458
if (!EntryPtr)
14401459
continue;
@@ -1502,14 +1521,18 @@ class Allocator {
15021521
}
15031522

15041523
static typename AllocationRingBuffer::Entry *
1505-
getRingBufferEntry(char *RawRingBuffer, uptr N) {
1524+
getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
1525+
char *RBEntryStart =
1526+
&reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
15061527
return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1507-
&RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1528+
RBEntryStart)[N];
15081529
}
15091530
static const typename AllocationRingBuffer::Entry *
1510-
getRingBufferEntry(const char *RawRingBuffer, uptr N) {
1531+
getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
1532+
const char *RBEntryStart =
1533+
&reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
15111534
return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1512-
&RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
1535+
RBEntryStart)[N];
15131536
}
15141537

15151538
void mapAndInitializeRingBuffer() {
@@ -1549,42 +1572,47 @@ class Allocator {
15491572
u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
15501573
DCHECK(isPowerOfTwo(RingSize));
15511574

1552-
StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
1553-
sizeof(atomic_u32) * TabSize;
1575+
uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
1576+
sizeof(atomic_u32) * TabSize;
15541577
MemMapT DepotMap;
15551578
DepotMap.map(
15561579
/*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
15571580
"scudo:stack_depot");
1558-
Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
1581+
auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
15591582
Depot->init(RingSize, TabSize);
1560-
RawStackDepotMap = DepotMap;
15611583

15621584
MemMapT MemMap;
15631585
MemMap.map(
15641586
/*Addr=*/0U,
15651587
roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
15661588
getPageSizeCached()),
15671589
"scudo:ring_buffer");
1568-
RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
1569-
RawRingBufferMap = MemMap;
1570-
RingBufferElements = AllocationRingBufferSize;
1590+
auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
1591+
RB->RawRingBufferMap = MemMap;
1592+
RB->RingBufferElements = AllocationRingBufferSize;
1593+
RB->Depot = Depot;
1594+
RB->StackDepotSize = StackDepotSize;
1595+
RB->RawStackDepotMap = DepotMap;
1596+
1597+
atomic_store(&RingBufferAddress, reinterpret_cast<uptr>(RB),
1598+
memory_order_release);
15711599
static_assert(sizeof(AllocationRingBuffer) %
15721600
alignof(typename AllocationRingBuffer::Entry) ==
15731601
0,
15741602
"invalid alignment");
15751603
}
15761604

15771605
void unmapRingBuffer() {
1578-
auto *RingBuffer = getRingBuffer();
1579-
if (RingBuffer != nullptr) {
1580-
RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
1581-
RawRingBufferMap.getCapacity());
1582-
}
1583-
RawRingBuffer = nullptr;
1584-
if (Depot) {
1585-
RawStackDepotMap.unmap(RawStackDepotMap.getBase(),
1586-
RawStackDepotMap.getCapacity());
1587-
}
1606+
AllocationRingBuffer *RB = getRingBuffer();
1607+
if (RB == nullptr)
1608+
return;
1609+
// N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
1610+
// is very important.
1611+
RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
1612+
RB->RawStackDepotMap.getCapacity());
1613+
RB->RawRingBufferMap.unmap(RB->RawRingBufferMap.getBase(),
1614+
RB->RawRingBufferMap.getCapacity());
1615+
atomic_store(&RingBufferAddress, 0, memory_order_release);
15881616
}
15891617

15901618
static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
@@ -1599,10 +1627,6 @@ class Allocator {
15991627
return (Bytes - sizeof(AllocationRingBuffer)) /
16001628
sizeof(typename AllocationRingBuffer::Entry);
16011629
}
1602-
1603-
inline AllocationRingBuffer *getRingBuffer() {
1604-
return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
1605-
}
16061630
};
16071631

16081632
} // namespace scudo

0 commit comments

Comments
 (0)