14
14
#include " flags.h"
15
15
#include " flags_parser.h"
16
16
#include " local_cache.h"
17
+ #include " mem_map.h"
17
18
#include " memtag.h"
18
19
#include " options.h"
19
20
#include " quarantine.h"
@@ -935,8 +936,7 @@ class Allocator {
935
936
936
937
uptr getRingBufferSize () {
937
938
initThreadMaybe ();
938
- auto *RingBuffer = getRingBuffer ();
939
- return RingBuffer ? ringBufferSizeInBytes (RingBuffer->Size ) : 0 ;
939
+ return RingBufferElements ? ringBufferSizeInBytes (RingBufferElements) : 0 ;
940
940
}
941
941
942
942
static bool setRingBufferSizeForBuffer (char *Buffer, size_t Size) {
@@ -966,8 +966,9 @@ class Allocator {
966
966
static void getErrorInfo (struct scudo_error_info *ErrorInfo,
967
967
uintptr_t FaultAddr, const char *DepotPtr,
968
968
const char *RegionInfoPtr, const char *RingBufferPtr,
969
- const char *Memory, const char *MemoryTags,
970
- uintptr_t MemoryAddr, size_t MemorySize) {
969
+ size_t RingBufferSize, const char *Memory,
970
+ const char *MemoryTags, uintptr_t MemoryAddr,
971
+ size_t MemorySize) {
971
972
*ErrorInfo = {};
972
973
if (!allocatorSupportsMemoryTagging<Config>() ||
973
974
MemoryAddr + MemorySize < MemoryAddr)
@@ -986,7 +987,7 @@ class Allocator {
986
987
// Check the ring buffer. For primary allocations this will only find UAF;
987
988
// for secondary allocations we can find either UAF or OOB.
988
989
getRingBufferErrorInfo (ErrorInfo, NextErrorReport, FaultAddr, Depot,
989
- RingBufferPtr);
990
+ RingBufferPtr, RingBufferSize );
990
991
991
992
// Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
992
993
// Beyond that we are likely to hit false positives.
@@ -1051,15 +1052,15 @@ class Allocator {
1051
1052
atomic_u32 DeallocationTid;
1052
1053
};
1053
1054
1054
- MemMapT MemMap;
1055
1055
atomic_uptr Pos;
1056
- u32 Size;
1057
1056
// An array of Size (at least one) elements of type Entry is immediately
1058
1057
// following to this struct.
1059
1058
};
1060
1059
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
1061
1060
// and immediately followed by Size elements of type Entry.
1062
1061
char *RawRingBuffer = {};
1062
+ u32 RingBufferElements = 0 ;
1063
+ MemMapT RawRingBufferMap;
1063
1064
1064
1065
// The following might get optimized out by the compiler.
1065
1066
NOINLINE void performSanityChecks () {
@@ -1267,7 +1268,7 @@ class Allocator {
1267
1268
u32 DeallocationTid) {
1268
1269
uptr Pos = atomic_fetch_add (&getRingBuffer ()->Pos , 1 , memory_order_relaxed);
1269
1270
typename AllocationRingBuffer::Entry *Entry =
1270
- getRingBufferEntry (RawRingBuffer, Pos % getRingBuffer ()-> Size );
1271
+ getRingBufferEntry (RawRingBuffer, Pos % RingBufferElements );
1271
1272
1272
1273
// First invalidate our entry so that we don't attempt to interpret a
1273
1274
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1408,17 +1409,19 @@ class Allocator {
1408
1409
size_t &NextErrorReport,
1409
1410
uintptr_t FaultAddr,
1410
1411
const StackDepot *Depot,
1411
- const char *RingBufferPtr) {
1412
+ const char *RingBufferPtr,
1413
+ size_t RingBufferSize) {
1412
1414
auto *RingBuffer =
1413
1415
reinterpret_cast <const AllocationRingBuffer *>(RingBufferPtr);
1414
- if (!RingBuffer || RingBuffer->Size == 0 )
1416
+ size_t RingBufferElements = ringBufferElementsFromBytes (RingBufferSize);
1417
+ if (!RingBuffer || RingBufferElements == 0 )
1415
1418
return ;
1416
1419
uptr Pos = atomic_load_relaxed (&RingBuffer->Pos );
1417
1420
1418
- for (uptr I = Pos - 1 ;
1419
- I != Pos - 1 - RingBuffer-> Size && NextErrorReport != NumErrorReports;
1421
+ for (uptr I = Pos - 1 ; I != Pos - 1 - RingBufferElements &&
1422
+ NextErrorReport != NumErrorReports;
1420
1423
--I) {
1421
- auto *Entry = getRingBufferEntry (RingBufferPtr, I % RingBuffer-> Size );
1424
+ auto *Entry = getRingBufferEntry (RingBufferPtr, I % RingBufferElements );
1422
1425
uptr EntryPtr = atomic_load_relaxed (&Entry->Ptr );
1423
1426
if (!EntryPtr)
1424
1427
continue ;
@@ -1508,9 +1511,8 @@ class Allocator {
1508
1511
getPageSizeCached ()),
1509
1512
" scudo:ring_buffer" );
1510
1513
RawRingBuffer = reinterpret_cast <char *>(MemMap.getBase ());
1511
- auto *RingBuffer = reinterpret_cast <AllocationRingBuffer *>(RawRingBuffer);
1512
- RingBuffer->MemMap = MemMap;
1513
- RingBuffer->Size = AllocationRingBufferSize;
1514
+ RawRingBufferMap = MemMap;
1515
+ RingBufferElements = AllocationRingBufferSize;
1514
1516
static_assert (sizeof (AllocationRingBuffer) %
1515
1517
alignof (typename AllocationRingBuffer::Entry) ==
1516
1518
0 ,
@@ -1520,16 +1522,23 @@ class Allocator {
1520
1522
void unmapRingBuffer () {
1521
1523
auto *RingBuffer = getRingBuffer ();
1522
1524
if (RingBuffer != nullptr ) {
1523
- MemMapT MemMap = RingBuffer-> MemMap ;
1524
- MemMap. unmap (MemMap. getBase (), MemMap .getCapacity ());
1525
+ RawRingBufferMap. unmap (RawRingBufferMap. getBase (),
1526
+ RawRingBufferMap .getCapacity ());
1525
1527
}
1526
1528
RawRingBuffer = nullptr ;
1527
1529
}
1528
1530
1529
- static constexpr size_t ringBufferSizeInBytes (u32 AllocationRingBufferSize ) {
1531
+ static constexpr size_t ringBufferSizeInBytes (u32 RingBufferElements ) {
1530
1532
return sizeof (AllocationRingBuffer) +
1531
- AllocationRingBufferSize *
1532
- sizeof (typename AllocationRingBuffer::Entry);
1533
+ RingBufferElements * sizeof (typename AllocationRingBuffer::Entry);
1534
+ }
1535
+
1536
+ static constexpr size_t ringBufferElementsFromBytes (size_t Bytes) {
1537
+ if (Bytes < sizeof (AllocationRingBuffer)) {
1538
+ return 0 ;
1539
+ }
1540
+ return (Bytes - sizeof (AllocationRingBuffer)) /
1541
+ sizeof (typename AllocationRingBuffer::Entry);
1533
1542
}
1534
1543
1535
1544
inline AllocationRingBuffer *getRingBuffer () {
0 commit comments