9
9
#ifndef SCUDO_COMBINED_H_
10
10
#define SCUDO_COMBINED_H_
11
11
12
+ #include " atomic_helpers.h"
12
13
#include " chunk.h"
13
14
#include " common.h"
14
15
#include " flags.h"
@@ -282,15 +283,15 @@ class Allocator {
282
283
return reinterpret_cast <void *>(addHeaderTag (reinterpret_cast <uptr>(Ptr)));
283
284
}
284
285
285
- NOINLINE u32 collectStackTrace () {
286
+ NOINLINE u32 collectStackTrace (UNUSED StackDepot *Depot ) {
286
287
#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
287
288
// Discard collectStackTrace() frame and allocator function frame.
288
289
constexpr uptr DiscardFrames = 2 ;
289
290
uptr Stack[MaxTraceSize + DiscardFrames];
290
291
uptr Size =
291
292
android_unsafe_frame_pointer_chase (Stack, MaxTraceSize + DiscardFrames);
292
293
Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
293
- return Depot. insert (Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
294
+ return Depot-> insert (Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
294
295
#else
295
296
return 0 ;
296
297
#endif
@@ -687,12 +688,12 @@ class Allocator {
687
688
Quarantine.disable ();
688
689
Primary.disable ();
689
690
Secondary.disable ();
690
- Depot. disable ();
691
+ Depot-> disable ();
691
692
}
692
693
693
694
void enable () NO_THREAD_SAFETY_ANALYSIS {
694
695
initThreadMaybe ();
695
- Depot. enable ();
696
+ Depot-> enable ();
696
697
Secondary.enable ();
697
698
Primary.enable ();
698
699
Quarantine.enable ();
@@ -915,8 +916,14 @@ class Allocator {
915
916
Primary.Options .clear (OptionBit::AddLargeAllocationSlack);
916
917
}
917
918
918
- const char *getStackDepotAddress () const {
919
- return reinterpret_cast <const char *>(&Depot);
919
+ const char *getStackDepotAddress () {
920
+ initThreadMaybe ();
921
+ return reinterpret_cast <char *>(Depot);
922
+ }
923
+
924
+ uptr getStackDepotSize () {
925
+ initThreadMaybe ();
926
+ return StackDepotSize;
920
927
}
921
928
922
929
const char *getRegionInfoArrayAddress () const {
@@ -945,21 +952,35 @@ class Allocator {
945
952
if (!Depot->find (Hash, &RingPos, &Size))
946
953
return ;
947
954
for (unsigned I = 0 ; I != Size && I != MaxTraceSize; ++I)
948
- Trace[I] = static_cast <uintptr_t >((* Depot)[ RingPos + I] );
955
+ Trace[I] = static_cast <uintptr_t >(Depot-> at ( RingPos + I) );
949
956
}
950
957
951
958
static void getErrorInfo (struct scudo_error_info *ErrorInfo,
952
959
uintptr_t FaultAddr, const char *DepotPtr,
953
- const char *RegionInfoPtr, const char *RingBufferPtr,
954
- size_t RingBufferSize, const char *Memory,
955
- const char *MemoryTags, uintptr_t MemoryAddr,
956
- size_t MemorySize) {
960
+ size_t DepotSize, const char *RegionInfoPtr,
961
+ const char *RingBufferPtr, size_t RingBufferSize,
962
+ const char *Memory, const char *MemoryTags,
963
+ uintptr_t MemoryAddr, size_t MemorySize) {
964
+ // N.B. we need to support corrupted data in any of the buffers here. We get
965
+ // this information from an external process (the crashing process) that
966
+ // should not be able to crash the crash dumper (crash_dump on Android).
967
+ // See also the get_error_info_fuzzer.
957
968
*ErrorInfo = {};
958
969
if (!allocatorSupportsMemoryTagging<Config>() ||
959
970
MemoryAddr + MemorySize < MemoryAddr)
960
971
return ;
961
972
962
- auto *Depot = reinterpret_cast <const StackDepot *>(DepotPtr);
973
+ const StackDepot *Depot = nullptr ;
974
+ if (DepotPtr) {
975
+ // check for corrupted StackDepot. First we need to check whether we can
976
+ // read the metadata, then whether the metadata matches the size.
977
+ if (DepotSize < sizeof (*Depot))
978
+ return ;
979
+ Depot = reinterpret_cast <const StackDepot *>(DepotPtr);
980
+ if (!Depot->isValid (DepotSize))
981
+ return ;
982
+ }
983
+
963
984
size_t NextErrorReport = 0 ;
964
985
965
986
// Check for OOB in the current block and the two surrounding blocks. Beyond
@@ -1025,7 +1046,9 @@ class Allocator {
1025
1046
uptr GuardedAllocSlotSize = 0 ;
1026
1047
#endif // GWP_ASAN_HOOKS
1027
1048
1028
- StackDepot Depot;
1049
+ StackDepot *Depot = nullptr ;
1050
+ uptr StackDepotSize = 0 ;
1051
+ MemMapT RawStackDepotMap;
1029
1052
1030
1053
struct AllocationRingBuffer {
1031
1054
struct Entry {
@@ -1234,11 +1257,18 @@ class Allocator {
1234
1257
storeEndMarker (RoundNewPtr, NewSize, BlockEnd);
1235
1258
}
1236
1259
1237
- void storePrimaryAllocationStackMaybe (const Options &Options, void *Ptr ) {
1260
+ StackDepot * getDepotIfEnabled (const Options &Options) {
1238
1261
if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1262
+ return nullptr ;
1263
+ return Depot;
1264
+ }
1265
+
1266
+ void storePrimaryAllocationStackMaybe (const Options &Options, void *Ptr) {
1267
+ auto *Depot = getDepotIfEnabled (Options);
1268
+ if (!Depot)
1239
1269
return ;
1240
1270
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1241
- Ptr32[MemTagAllocationTraceIndex] = collectStackTrace ();
1271
+ Ptr32[MemTagAllocationTraceIndex] = collectStackTrace (Depot );
1242
1272
Ptr32[MemTagAllocationTidIndex] = getThreadID ();
1243
1273
}
1244
1274
@@ -1268,10 +1298,10 @@ class Allocator {
1268
1298
1269
1299
void storeSecondaryAllocationStackMaybe (const Options &Options, void *Ptr,
1270
1300
uptr Size) {
1271
- if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1301
+ auto *Depot = getDepotIfEnabled (Options);
1302
+ if (!Depot)
1272
1303
return ;
1273
-
1274
- u32 Trace = collectStackTrace ();
1304
+ u32 Trace = collectStackTrace (Depot);
1275
1305
u32 Tid = getThreadID ();
1276
1306
1277
1307
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
@@ -1283,14 +1313,14 @@ class Allocator {
1283
1313
1284
1314
void storeDeallocationStackMaybe (const Options &Options, void *Ptr,
1285
1315
u8 PrevTag, uptr Size) {
1286
- if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1316
+ auto *Depot = getDepotIfEnabled (Options);
1317
+ if (!Depot)
1287
1318
return ;
1288
-
1289
1319
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1290
1320
u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1291
1321
u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1292
1322
1293
- u32 DeallocationTrace = collectStackTrace ();
1323
+ u32 DeallocationTrace = collectStackTrace (Depot );
1294
1324
u32 DeallocationTid = getThreadID ();
1295
1325
1296
1326
storeRingBufferEntry (addFixedTag (untagPointer (Ptr), PrevTag),
@@ -1369,8 +1399,10 @@ class Allocator {
1369
1399
UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1370
1400
R->allocation_address = ChunkAddr;
1371
1401
R->allocation_size = Header.SizeOrUnusedBytes ;
1372
- collectTraceMaybe (Depot, R->allocation_trace ,
1373
- Data[MemTagAllocationTraceIndex]);
1402
+ if (Depot) {
1403
+ collectTraceMaybe (Depot, R->allocation_trace ,
1404
+ Data[MemTagAllocationTraceIndex]);
1405
+ }
1374
1406
R->allocation_tid = Data[MemTagAllocationTidIndex];
1375
1407
return NextErrorReport == NumErrorReports;
1376
1408
};
@@ -1393,7 +1425,7 @@ class Allocator {
1393
1425
auto *RingBuffer =
1394
1426
reinterpret_cast <const AllocationRingBuffer *>(RingBufferPtr);
1395
1427
size_t RingBufferElements = ringBufferElementsFromBytes (RingBufferSize);
1396
- if (!RingBuffer || RingBufferElements == 0 )
1428
+ if (!RingBuffer || RingBufferElements == 0 || !Depot )
1397
1429
return ;
1398
1430
uptr Pos = atomic_load_relaxed (&RingBuffer->Pos );
1399
1431
@@ -1483,6 +1515,43 @@ class Allocator {
1483
1515
return ;
1484
1516
u32 AllocationRingBufferSize =
1485
1517
static_cast <u32 >(getFlags ()->allocation_ring_buffer_size );
1518
+
1519
+ // We store alloc and free stacks for each entry.
1520
+ constexpr u32 kStacksPerRingBufferEntry = 2 ;
1521
+ constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1 );
1522
+ static_assert (isPowerOfTwo (kMaxU32Pow2 ));
1523
+ constexpr u32 kFramesPerStack = 8 ;
1524
+ static_assert (isPowerOfTwo (kFramesPerStack ));
1525
+
1526
+ // We need StackDepot to be aligned to 8-bytes so the ring we store after
1527
+ // is correctly assigned.
1528
+ static_assert (sizeof (StackDepot) % alignof (atomic_u64) == 0 );
1529
+
1530
+ // Make sure the maximum sized StackDepot fits withint a uintptr_t to
1531
+ // simplify the overflow checking.
1532
+ static_assert (sizeof (StackDepot) + UINT32_MAX * sizeof (atomic_u64) *
1533
+ UINT32_MAX * sizeof (atomic_u32) <
1534
+ UINTPTR_MAX);
1535
+
1536
+ if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry )
1537
+ return ;
1538
+ u32 TabSize = static_cast <u32 >(roundUpPowerOfTwo (kStacksPerRingBufferEntry *
1539
+ AllocationRingBufferSize));
1540
+ if (TabSize > UINT32_MAX / kFramesPerStack )
1541
+ return ;
1542
+ u32 RingSize = static_cast <u32 >(TabSize * kFramesPerStack );
1543
+ DCHECK (isPowerOfTwo (RingSize));
1544
+
1545
+ StackDepotSize = sizeof (StackDepot) + sizeof (atomic_u64) * RingSize +
1546
+ sizeof (atomic_u32) * TabSize;
1547
+ MemMapT DepotMap;
1548
+ DepotMap.map (
1549
+ /* Addr=*/ 0U , roundUp (StackDepotSize, getPageSizeCached ()),
1550
+ " scudo:stack_depot" );
1551
+ Depot = reinterpret_cast <StackDepot *>(DepotMap.getBase ());
1552
+ Depot->init (RingSize, TabSize);
1553
+ RawStackDepotMap = DepotMap;
1554
+
1486
1555
MemMapT MemMap;
1487
1556
MemMap.map (
1488
1557
/* Addr=*/ 0U ,
@@ -1505,6 +1574,10 @@ class Allocator {
1505
1574
RawRingBufferMap.getCapacity ());
1506
1575
}
1507
1576
RawRingBuffer = nullptr ;
1577
+ if (Depot) {
1578
+ RawStackDepotMap.unmap (RawStackDepotMap.getBase (),
1579
+ RawStackDepotMap.getCapacity ());
1580
+ }
1508
1581
}
1509
1582
1510
1583
static constexpr size_t ringBufferSizeInBytes (u32 RingBufferElements) {
0 commit comments