9
9
#ifndef SCUDO_COMBINED_H_
10
10
#define SCUDO_COMBINED_H_
11
11
12
- #include " atomic_helpers.h"
13
12
#include " chunk.h"
14
13
#include " common.h"
15
14
#include " flags.h"
@@ -283,15 +282,15 @@ class Allocator {
283
282
return reinterpret_cast <void *>(addHeaderTag (reinterpret_cast <uptr>(Ptr)));
284
283
}
285
284
286
- NOINLINE u32 collectStackTrace (UNUSED StackDepot *Depot ) {
285
+ NOINLINE u32 collectStackTrace () {
287
286
#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
288
287
// Discard collectStackTrace() frame and allocator function frame.
289
288
constexpr uptr DiscardFrames = 2 ;
290
289
uptr Stack[MaxTraceSize + DiscardFrames];
291
290
uptr Size =
292
291
android_unsafe_frame_pointer_chase (Stack, MaxTraceSize + DiscardFrames);
293
292
Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
294
- return Depot-> insert (Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
293
+ return Depot. insert (Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
295
294
#else
296
295
return 0 ;
297
296
#endif
@@ -688,12 +687,12 @@ class Allocator {
688
687
Quarantine.disable ();
689
688
Primary.disable ();
690
689
Secondary.disable ();
691
- Depot-> disable ();
690
+ Depot. disable ();
692
691
}
693
692
694
693
void enable () NO_THREAD_SAFETY_ANALYSIS {
695
694
initThreadMaybe ();
696
- Depot-> enable ();
695
+ Depot. enable ();
697
696
Secondary.enable ();
698
697
Primary.enable ();
699
698
Quarantine.enable ();
@@ -916,14 +915,8 @@ class Allocator {
916
915
Primary.Options .clear (OptionBit::AddLargeAllocationSlack);
917
916
}
918
917
919
- const char *getStackDepotAddress () {
920
- initThreadMaybe ();
921
- return reinterpret_cast <char *>(Depot);
922
- }
923
-
924
- uptr getStackDepotSize () {
925
- initThreadMaybe ();
926
- return StackDepotSize;
918
+ const char *getStackDepotAddress () const {
919
+ return reinterpret_cast <const char *>(&Depot);
927
920
}
928
921
929
922
const char *getRegionInfoArrayAddress () const {
@@ -952,35 +945,21 @@ class Allocator {
952
945
if (!Depot->find (Hash, &RingPos, &Size))
953
946
return ;
954
947
for (unsigned I = 0 ; I != Size && I != MaxTraceSize; ++I)
955
- Trace[I] = static_cast <uintptr_t >(Depot-> at ( RingPos + I) );
948
+ Trace[I] = static_cast <uintptr_t >((*Depot)[ RingPos + I] );
956
949
}
957
950
958
951
static void getErrorInfo (struct scudo_error_info *ErrorInfo,
959
952
uintptr_t FaultAddr, const char *DepotPtr,
960
- size_t DepotSize, const char *RegionInfoPtr,
961
- const char *RingBufferPtr, size_t RingBufferSize,
962
- const char *Memory, const char *MemoryTags,
963
- uintptr_t MemoryAddr, size_t MemorySize) {
964
- // N.B. we need to support corrupted data in any of the buffers here. We get
965
- // this information from an external process (the crashing process) that
966
- // should not be able to crash the crash dumper (crash_dump on Android).
967
- // See also the get_error_info_fuzzer.
953
+ const char *RegionInfoPtr, const char *RingBufferPtr,
954
+ size_t RingBufferSize, const char *Memory,
955
+ const char *MemoryTags, uintptr_t MemoryAddr,
956
+ size_t MemorySize) {
968
957
*ErrorInfo = {};
969
958
if (!allocatorSupportsMemoryTagging<Config>() ||
970
959
MemoryAddr + MemorySize < MemoryAddr)
971
960
return ;
972
961
973
- const StackDepot *Depot = nullptr ;
974
- if (DepotPtr) {
975
- // check for corrupted StackDepot. First we need to check whether we can
976
- // read the metadata, then whether the metadata matches the size.
977
- if (DepotSize < sizeof (*Depot))
978
- return ;
979
- Depot = reinterpret_cast <const StackDepot *>(DepotPtr);
980
- if (!Depot->isValid (DepotSize))
981
- return ;
982
- }
983
-
962
+ auto *Depot = reinterpret_cast <const StackDepot *>(DepotPtr);
984
963
size_t NextErrorReport = 0 ;
985
964
986
965
// Check for OOB in the current block and the two surrounding blocks. Beyond
@@ -1046,9 +1025,7 @@ class Allocator {
1046
1025
uptr GuardedAllocSlotSize = 0 ;
1047
1026
#endif // GWP_ASAN_HOOKS
1048
1027
1049
- StackDepot *Depot = nullptr ;
1050
- uptr StackDepotSize = 0 ;
1051
- MemMapT RawStackDepotMap;
1028
+ StackDepot Depot;
1052
1029
1053
1030
struct AllocationRingBuffer {
1054
1031
struct Entry {
@@ -1257,18 +1234,11 @@ class Allocator {
1257
1234
storeEndMarker (RoundNewPtr, NewSize, BlockEnd);
1258
1235
}
1259
1236
1260
- StackDepot *getDepotIfEnabled (const Options &Options) {
1261
- if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1262
- return nullptr ;
1263
- return Depot;
1264
- }
1265
-
1266
1237
void storePrimaryAllocationStackMaybe (const Options &Options, void *Ptr) {
1267
- auto *Depot = getDepotIfEnabled (Options);
1268
- if (!Depot)
1238
+ if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1269
1239
return ;
1270
1240
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1271
- Ptr32[MemTagAllocationTraceIndex] = collectStackTrace (Depot );
1241
+ Ptr32[MemTagAllocationTraceIndex] = collectStackTrace ();
1272
1242
Ptr32[MemTagAllocationTidIndex] = getThreadID ();
1273
1243
}
1274
1244
@@ -1298,10 +1268,10 @@ class Allocator {
1298
1268
1299
1269
void storeSecondaryAllocationStackMaybe (const Options &Options, void *Ptr,
1300
1270
uptr Size) {
1301
- auto *Depot = getDepotIfEnabled (Options);
1302
- if (!Depot)
1271
+ if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1303
1272
return ;
1304
- u32 Trace = collectStackTrace (Depot);
1273
+
1274
+ u32 Trace = collectStackTrace ();
1305
1275
u32 Tid = getThreadID ();
1306
1276
1307
1277
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
@@ -1313,14 +1283,14 @@ class Allocator {
1313
1283
1314
1284
void storeDeallocationStackMaybe (const Options &Options, void *Ptr,
1315
1285
u8 PrevTag, uptr Size) {
1316
- auto *Depot = getDepotIfEnabled (Options);
1317
- if (!Depot)
1286
+ if (!UNLIKELY (Options.get (OptionBit::TrackAllocationStacks)))
1318
1287
return ;
1288
+
1319
1289
auto *Ptr32 = reinterpret_cast <u32 *>(Ptr);
1320
1290
u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1321
1291
u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1322
1292
1323
- u32 DeallocationTrace = collectStackTrace (Depot );
1293
+ u32 DeallocationTrace = collectStackTrace ();
1324
1294
u32 DeallocationTid = getThreadID ();
1325
1295
1326
1296
storeRingBufferEntry (addFixedTag (untagPointer (Ptr), PrevTag),
@@ -1399,10 +1369,8 @@ class Allocator {
1399
1369
UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1400
1370
R->allocation_address = ChunkAddr;
1401
1371
R->allocation_size = Header.SizeOrUnusedBytes ;
1402
- if (Depot) {
1403
- collectTraceMaybe (Depot, R->allocation_trace ,
1404
- Data[MemTagAllocationTraceIndex]);
1405
- }
1372
+ collectTraceMaybe (Depot, R->allocation_trace ,
1373
+ Data[MemTagAllocationTraceIndex]);
1406
1374
R->allocation_tid = Data[MemTagAllocationTidIndex];
1407
1375
return NextErrorReport == NumErrorReports;
1408
1376
};
@@ -1425,7 +1393,7 @@ class Allocator {
1425
1393
auto *RingBuffer =
1426
1394
reinterpret_cast <const AllocationRingBuffer *>(RingBufferPtr);
1427
1395
size_t RingBufferElements = ringBufferElementsFromBytes (RingBufferSize);
1428
- if (!RingBuffer || RingBufferElements == 0 || !Depot )
1396
+ if (!RingBuffer || RingBufferElements == 0 )
1429
1397
return ;
1430
1398
uptr Pos = atomic_load_relaxed (&RingBuffer->Pos );
1431
1399
@@ -1515,43 +1483,6 @@ class Allocator {
1515
1483
return ;
1516
1484
u32 AllocationRingBufferSize =
1517
1485
static_cast <u32 >(getFlags ()->allocation_ring_buffer_size );
1518
-
1519
- // We store alloc and free stacks for each entry.
1520
- constexpr u32 kStacksPerRingBufferEntry = 2 ;
1521
- constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1 );
1522
- static_assert (isPowerOfTwo (kMaxU32Pow2 ));
1523
- constexpr u32 kFramesPerStack = 8 ;
1524
- static_assert (isPowerOfTwo (kFramesPerStack ));
1525
-
1526
- // We need StackDepot to be aligned to 8-bytes so the ring we store after
1527
- // is correctly assigned.
1528
- static_assert (sizeof (StackDepot) % alignof (atomic_u64) == 0 );
1529
-
1530
- // Make sure the maximum sized StackDepot fits withint a uintptr_t to
1531
- // simplify the overflow checking.
1532
- static_assert (sizeof (StackDepot) + UINT32_MAX * sizeof (atomic_u64) *
1533
- UINT32_MAX * sizeof (atomic_u32) <
1534
- UINTPTR_MAX);
1535
-
1536
- if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry )
1537
- return ;
1538
- u32 TabSize = static_cast <u32 >(roundUpPowerOfTwo (kStacksPerRingBufferEntry *
1539
- AllocationRingBufferSize));
1540
- if (TabSize > UINT32_MAX / kFramesPerStack )
1541
- return ;
1542
- u32 RingSize = static_cast <u32 >(TabSize * kFramesPerStack );
1543
- DCHECK (isPowerOfTwo (RingSize));
1544
-
1545
- StackDepotSize = sizeof (StackDepot) + sizeof (atomic_u64) * RingSize +
1546
- sizeof (atomic_u32) * TabSize;
1547
- MemMapT DepotMap;
1548
- DepotMap.map (
1549
- /* Addr=*/ 0U , roundUp (StackDepotSize, getPageSizeCached ()),
1550
- " scudo:stack_depot" );
1551
- Depot = reinterpret_cast <StackDepot *>(DepotMap.getBase ());
1552
- Depot->init (RingSize, TabSize);
1553
- RawStackDepotMap = DepotMap;
1554
-
1555
1486
MemMapT MemMap;
1556
1487
MemMap.map (
1557
1488
/* Addr=*/ 0U ,
@@ -1574,10 +1505,6 @@ class Allocator {
1574
1505
RawRingBufferMap.getCapacity ());
1575
1506
}
1576
1507
RawRingBuffer = nullptr ;
1577
- if (Depot) {
1578
- RawStackDepotMap.unmap (RawStackDepotMap.getBase (),
1579
- RawStackDepotMap.getCapacity ());
1580
- }
1581
1508
}
1582
1509
1583
1510
static constexpr size_t ringBufferSizeInBytes (u32 RingBufferElements) {
0 commit comments