@@ -177,6 +177,18 @@ class Allocator {
177
177
mapAndInitializeRingBuffer ();
178
178
}
179
179
180
+ void enableRingBuffer () {
181
+ AllocationRingBuffer *RB = getRingBuffer ();
182
+ if (RB)
183
+ RB->Depot ->enable ();
184
+ }
185
+
186
+ void disableRingBuffer () {
187
+ AllocationRingBuffer *RB = getRingBuffer ();
188
+ if (RB)
189
+ RB->Depot ->disable ();
190
+ }
191
+
180
192
// Initialize the embedded GWP-ASan instance. Requires the main allocator to
181
193
// be functional, best called from PostInitCallback.
182
194
void initGwpAsan () {
@@ -688,14 +700,12 @@ class Allocator {
688
700
Quarantine.disable ();
689
701
Primary.disable ();
690
702
Secondary.disable ();
691
- if (Depot)
692
- Depot->disable ();
703
+ disableRingBuffer ();
693
704
}
694
705
695
706
void enable () NO_THREAD_SAFETY_ANALYSIS {
696
707
initThreadMaybe ();
697
- if (Depot)
698
- Depot->enable ();
708
+ enableRingBuffer ();
699
709
Secondary.enable ();
700
710
Primary.enable ();
701
711
Quarantine.enable ();
@@ -920,12 +930,14 @@ class Allocator {
920
930
921
931
const char *getStackDepotAddress () {
922
932
initThreadMaybe ();
923
- return reinterpret_cast <char *>(Depot);
933
+ AllocationRingBuffer *RB = getRingBuffer ();
934
+ return RB ? reinterpret_cast <char *>(RB->Depot ) : nullptr ;
924
935
}
925
936
926
937
uptr getStackDepotSize () {
927
938
initThreadMaybe ();
928
- return StackDepotSize;
939
+ AllocationRingBuffer *RB = getRingBuffer ();
940
+ return RB ? RB->StackDepotSize : 0 ;
929
941
}
930
942
931
943
const char *getRegionInfoArrayAddress () const {
@@ -938,12 +950,15 @@ class Allocator {
938
950
939
951
const char *getRingBufferAddress () {
940
952
initThreadMaybe ();
941
- return RawRingBuffer ;
953
+ return reinterpret_cast < char *>( getRingBuffer ()) ;
942
954
}
943
955
944
956
uptr getRingBufferSize () {
945
957
initThreadMaybe ();
946
- return RingBufferElements ? ringBufferSizeInBytes (RingBufferElements) : 0 ;
958
+ AllocationRingBuffer *RB = getRingBuffer ();
959
+ return RB && RB->RingBufferElements
960
+ ? ringBufferSizeInBytes (RB->RingBufferElements )
961
+ : 0 ;
947
962
}
948
963
949
964
static const uptr MaxTraceSize = 64 ;
@@ -1048,10 +1063,6 @@ class Allocator {
1048
1063
uptr GuardedAllocSlotSize = 0 ;
1049
1064
#endif // GWP_ASAN_HOOKS
1050
1065
1051
- StackDepot *Depot = nullptr ;
1052
- uptr StackDepotSize = 0 ;
1053
- MemMapT RawStackDepotMap;
1054
-
1055
1066
struct AllocationRingBuffer {
1056
1067
struct Entry {
1057
1068
atomic_uptr Ptr;
@@ -1061,16 +1072,23 @@ class Allocator {
1061
1072
atomic_u32 DeallocationTrace;
1062
1073
atomic_u32 DeallocationTid;
1063
1074
};
1064
-
1075
+ StackDepot *Depot = nullptr ;
1076
+ uptr StackDepotSize = 0 ;
1077
+ MemMapT RawRingBufferMap;
1078
+ MemMapT RawStackDepotMap;
1079
+ u32 RingBufferElements = 0 ;
1065
1080
atomic_uptr Pos;
1066
1081
// An array of Size (at least one) elements of type Entry is immediately
1067
1082
// following to this struct.
1068
1083
};
1069
1084
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
1070
1085
// and immediately followed by Size elements of type Entry.
1071
- char *RawRingBuffer = {};
1072
- u32 RingBufferElements = 0 ;
1073
- MemMapT RawRingBufferMap;
1086
+ atomic_uptr RingBufferAddress = {};
1087
+
1088
+ AllocationRingBuffer *getRingBuffer () {
1089
+ return reinterpret_cast <AllocationRingBuffer *>(
1090
+ atomic_load (&RingBufferAddress, memory_order_acquire));
1091
+ }
1074
1092
1075
1093
// The following might get optimized out by the compiler.
1076
1094
NOINLINE void performSanityChecks () {
@@ -1259,27 +1277,24 @@ class Allocator {
1259
1277
storeEndMarker (RoundNewPtr, NewSize, BlockEnd);
1260
1278
}
1261
1279
1262
- StackDepot *getDepotIfEnabled (const Options &Options) {
1263
- if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1264
- return nullptr ;
1265
- return Depot;
1266
- }
1267
-
1268
1280
void storePrimaryAllocationStackMaybe (const Options &Options, void *Ptr) {
1269
- auto *Depot = getDepotIfEnabled (Options);
1270
- if (!Depot)
1281
+ if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1282
+ return ;
1283
+ AllocationRingBuffer *RB = getRingBuffer ();
1284
+ if (!RB)
1271
1285
return ;
1272
1286
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1273
- Ptr32[MemTagAllocationTraceIndex] = collectStackTrace (Depot);
1287
+ Ptr32[MemTagAllocationTraceIndex] = collectStackTrace (RB-> Depot );
1274
1288
Ptr32[MemTagAllocationTidIndex] = getThreadID ();
1275
1289
}
1276
1290
1277
- void storeRingBufferEntry (void *Ptr, u32 AllocationTrace, u32 AllocationTid,
1291
+ void storeRingBufferEntry (AllocationRingBuffer *RB, void *Ptr,
1292
+ u32 AllocationTrace, u32 AllocationTid,
1278
1293
uptr AllocationSize, u32 DeallocationTrace,
1279
1294
u32 DeallocationTid) {
1280
- uptr Pos = atomic_fetch_add (&getRingBuffer () ->Pos , 1 , memory_order_relaxed);
1295
+ uptr Pos = atomic_fetch_add (&RB ->Pos , 1 , memory_order_relaxed);
1281
1296
typename AllocationRingBuffer::Entry *Entry =
1282
- getRingBufferEntry (RawRingBuffer , Pos % RingBufferElements);
1297
+ getRingBufferEntry (RB , Pos % RB-> RingBufferElements );
1283
1298
1284
1299
// First invalidate our entry so that we don't attempt to interpret a
1285
1300
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1300,32 +1315,36 @@ class Allocator {
1300
1315
1301
1316
void storeSecondaryAllocationStackMaybe (const Options &Options, void *Ptr,
1302
1317
uptr Size) {
1303
- auto *Depot = getDepotIfEnabled (Options);
1304
- if (!Depot)
1318
+ if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1305
1319
return ;
1306
- u32 Trace = collectStackTrace (Depot);
1320
+ AllocationRingBuffer *RB = getRingBuffer ();
1321
+ if (!RB)
1322
+ return ;
1323
+ u32 Trace = collectStackTrace (RB->Depot );
1307
1324
u32 Tid = getThreadID ();
1308
1325
1309
1326
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1310
1327
Ptr32[MemTagAllocationTraceIndex] = Trace;
1311
1328
Ptr32[MemTagAllocationTidIndex] = Tid;
1312
1329
1313
- storeRingBufferEntry (untagPointer (Ptr), Trace, Tid, Size, 0 , 0 );
1330
+ storeRingBufferEntry (RB, untagPointer (Ptr), Trace, Tid, Size, 0 , 0 );
1314
1331
}
1315
1332
1316
1333
void storeDeallocationStackMaybe (const Options &Options, void *Ptr,
1317
1334
u8 PrevTag, uptr Size) {
1318
- auto *Depot = getDepotIfEnabled (Options);
1319
- if (!Depot)
1335
+ if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1336
+ return ;
1337
+ AllocationRingBuffer *RB = getRingBuffer ();
1338
+ if (!RB)
1320
1339
return ;
1321
1340
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1322
1341
u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1323
1342
u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1324
1343
1325
- u32 DeallocationTrace = collectStackTrace (Depot);
1344
+ u32 DeallocationTrace = collectStackTrace (RB-> Depot );
1326
1345
u32 DeallocationTid = getThreadID ();
1327
1346
1328
- storeRingBufferEntry (addFixedTag (untagPointer (Ptr), PrevTag),
1347
+ storeRingBufferEntry (RB, addFixedTag (untagPointer (Ptr), PrevTag),
1329
1348
AllocationTrace, AllocationTid, Size,
1330
1349
DeallocationTrace, DeallocationTid);
1331
1350
}
@@ -1434,7 +1453,7 @@ class Allocator {
1434
1453
for (uptr I = Pos - 1 ; I != Pos - 1 - RingBufferElements &&
1435
1454
NextErrorReport != NumErrorReports;
1436
1455
--I) {
1437
- auto *Entry = getRingBufferEntry (RingBufferPtr , I % RingBufferElements);
1456
+ auto *Entry = getRingBufferEntry (RingBuffer , I % RingBufferElements);
1438
1457
uptr EntryPtr = atomic_load_relaxed (&Entry->Ptr );
1439
1458
if (!EntryPtr)
1440
1459
continue ;
@@ -1502,14 +1521,18 @@ class Allocator {
1502
1521
}
1503
1522
1504
1523
static typename AllocationRingBuffer::Entry *
1505
- getRingBufferEntry (char *RawRingBuffer, uptr N) {
1524
+ getRingBufferEntry (AllocationRingBuffer *RB, uptr N) {
1525
+ char *RBEntryStart =
1526
+ &reinterpret_cast <char *>(RB)[sizeof (AllocationRingBuffer)];
1506
1527
return &reinterpret_cast <typename AllocationRingBuffer::Entry *>(
1507
- &RawRingBuffer[ sizeof (AllocationRingBuffer)] )[N];
1528
+ RBEntryStart )[N];
1508
1529
}
1509
1530
static const typename AllocationRingBuffer::Entry *
1510
- getRingBufferEntry (const char *RawRingBuffer, uptr N) {
1531
+ getRingBufferEntry (const AllocationRingBuffer *RB, uptr N) {
1532
+ const char *RBEntryStart =
1533
+ &reinterpret_cast <const char *>(RB)[sizeof (AllocationRingBuffer)];
1511
1534
return &reinterpret_cast <const typename AllocationRingBuffer::Entry *>(
1512
- &RawRingBuffer[ sizeof (AllocationRingBuffer)] )[N];
1535
+ RBEntryStart )[N];
1513
1536
}
1514
1537
1515
1538
void mapAndInitializeRingBuffer () {
@@ -1549,42 +1572,47 @@ class Allocator {
1549
1572
u32 RingSize = static_cast <u32 >(TabSize * kFramesPerStack );
1550
1573
DCHECK (isPowerOfTwo (RingSize));
1551
1574
1552
- StackDepotSize = sizeof (StackDepot) + sizeof (atomic_u64) * RingSize +
1553
- sizeof (atomic_u32) * TabSize;
1575
+ uptr StackDepotSize = sizeof (StackDepot) + sizeof (atomic_u64) * RingSize +
1576
+ sizeof (atomic_u32) * TabSize;
1554
1577
MemMapT DepotMap;
1555
1578
DepotMap.map (
1556
1579
/* Addr=*/ 0U , roundUp (StackDepotSize, getPageSizeCached ()),
1557
1580
" scudo:stack_depot" );
1558
- Depot = reinterpret_cast <StackDepot *>(DepotMap.getBase ());
1581
+ auto * Depot = reinterpret_cast <StackDepot *>(DepotMap.getBase ());
1559
1582
Depot->init (RingSize, TabSize);
1560
- RawStackDepotMap = DepotMap;
1561
1583
1562
1584
MemMapT MemMap;
1563
1585
MemMap.map (
1564
1586
/* Addr=*/ 0U ,
1565
1587
roundUp (ringBufferSizeInBytes (AllocationRingBufferSize),
1566
1588
getPageSizeCached ()),
1567
1589
" scudo:ring_buffer" );
1568
- RawRingBuffer = reinterpret_cast <char *>(MemMap.getBase ());
1569
- RawRingBufferMap = MemMap;
1570
- RingBufferElements = AllocationRingBufferSize;
1590
+ auto *RB = reinterpret_cast <AllocationRingBuffer *>(MemMap.getBase ());
1591
+ RB->RawRingBufferMap = MemMap;
1592
+ RB->RingBufferElements = AllocationRingBufferSize;
1593
+ RB->Depot = Depot;
1594
+ RB->StackDepotSize = StackDepotSize;
1595
+ RB->RawStackDepotMap = DepotMap;
1596
+
1597
+ atomic_store (&RingBufferAddress, reinterpret_cast <uptr>(RB),
1598
+ memory_order_release);
1571
1599
static_assert (sizeof (AllocationRingBuffer) %
1572
1600
alignof (typename AllocationRingBuffer::Entry) ==
1573
1601
0 ,
1574
1602
" invalid alignment" );
1575
1603
}
1576
1604
1577
1605
void unmapRingBuffer () {
1578
- auto *RingBuffer = getRingBuffer ();
1579
- if (RingBuffer != nullptr ) {
1580
- RawRingBufferMap. unmap (RawRingBufferMap. getBase (),
1581
- RawRingBufferMap. getCapacity ());
1582
- }
1583
- RawRingBuffer = nullptr ;
1584
- if (Depot) {
1585
- RawStackDepotMap .unmap (RawStackDepotMap .getBase (),
1586
- RawStackDepotMap .getCapacity ());
1587
- }
1606
+ AllocationRingBuffer *RB = getRingBuffer ();
1607
+ if (RB == nullptr )
1608
+ return ;
1609
+ // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
1610
+ // is very important.
1611
+ RB-> RawStackDepotMap . unmap (RB-> RawStackDepotMap . getBase (),
1612
+ RB-> RawStackDepotMap . getCapacity ());
1613
+ RB-> RawRingBufferMap .unmap (RB-> RawRingBufferMap .getBase (),
1614
+ RB-> RawRingBufferMap .getCapacity ());
1615
+ atomic_store (&RingBufferAddress, 0 , memory_order_release);
1588
1616
}
1589
1617
1590
1618
static constexpr size_t ringBufferSizeInBytes (u32 RingBufferElements) {
@@ -1599,10 +1627,6 @@ class Allocator {
1599
1627
return (Bytes - sizeof (AllocationRingBuffer)) /
1600
1628
sizeof (typename AllocationRingBuffer::Entry);
1601
1629
}
1602
-
1603
- inline AllocationRingBuffer *getRingBuffer () {
1604
- return reinterpret_cast <AllocationRingBuffer *>(RawRingBuffer);
1605
- }
1606
1630
};
1607
1631
1608
1632
} // namespace scudo
0 commit comments