Skip to content

Reland "[scudo] resize stack depot for allocation ring buffer" #81028

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 97 additions & 24 deletions compiler-rt/lib/scudo/standalone/combined.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#ifndef SCUDO_COMBINED_H_
#define SCUDO_COMBINED_H_

#include "atomic_helpers.h"
#include "chunk.h"
#include "common.h"
#include "flags.h"
Expand Down Expand Up @@ -282,15 +283,15 @@ class Allocator {
return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
}

NOINLINE u32 collectStackTrace() {
NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
// Discard collectStackTrace() frame and allocator function frame.
constexpr uptr DiscardFrames = 2;
uptr Stack[MaxTraceSize + DiscardFrames];
uptr Size =
android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
#else
return 0;
#endif
Expand Down Expand Up @@ -687,12 +688,12 @@ class Allocator {
Quarantine.disable();
Primary.disable();
Secondary.disable();
Depot.disable();
Depot->disable();
}

void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
Depot.enable();
Depot->enable();
Secondary.enable();
Primary.enable();
Quarantine.enable();
Expand Down Expand Up @@ -915,8 +916,14 @@ class Allocator {
Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
}

const char *getStackDepotAddress() const {
return reinterpret_cast<const char *>(&Depot);
const char *getStackDepotAddress() {
initThreadMaybe();
return reinterpret_cast<char *>(Depot);
}

uptr getStackDepotSize() {
initThreadMaybe();
return StackDepotSize;
}

const char *getRegionInfoArrayAddress() const {
Expand Down Expand Up @@ -945,21 +952,35 @@ class Allocator {
if (!Depot->find(Hash, &RingPos, &Size))
return;
for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
}

static void getErrorInfo(struct scudo_error_info *ErrorInfo,
uintptr_t FaultAddr, const char *DepotPtr,
const char *RegionInfoPtr, const char *RingBufferPtr,
size_t RingBufferSize, const char *Memory,
const char *MemoryTags, uintptr_t MemoryAddr,
size_t MemorySize) {
size_t DepotSize, const char *RegionInfoPtr,
const char *RingBufferPtr, size_t RingBufferSize,
const char *Memory, const char *MemoryTags,
uintptr_t MemoryAddr, size_t MemorySize) {
// N.B. we need to support corrupted data in any of the buffers here. We get
// this information from an external process (the crashing process) that
// should not be able to crash the crash dumper (crash_dump on Android).
// See also the get_error_info_fuzzer.
*ErrorInfo = {};
if (!allocatorSupportsMemoryTagging<Config>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;

auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
const StackDepot *Depot = nullptr;
if (DepotPtr) {
// check for corrupted StackDepot. First we need to check whether we can
// read the metadata, then whether the metadata matches the size.
if (DepotSize < sizeof(*Depot))
return;
Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
if (!Depot->isValid(DepotSize))
return;
}

size_t NextErrorReport = 0;

// Check for OOB in the current block and the two surrounding blocks. Beyond
Expand Down Expand Up @@ -1025,7 +1046,9 @@ class Allocator {
uptr GuardedAllocSlotSize = 0;
#endif // GWP_ASAN_HOOKS

StackDepot Depot;
StackDepot *Depot = nullptr;
uptr StackDepotSize = 0;
MemMapT RawStackDepotMap;

struct AllocationRingBuffer {
struct Entry {
Expand Down Expand Up @@ -1234,11 +1257,18 @@ class Allocator {
storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
}

void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
StackDepot *getDepotIfEnabled(const Options &Options) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return nullptr;
return Depot;
}

void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
auto *Depot = getDepotIfEnabled(Options);
if (!Depot)
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot);
Ptr32[MemTagAllocationTidIndex] = getThreadID();
}

Expand Down Expand Up @@ -1268,10 +1298,10 @@ class Allocator {

void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
auto *Depot = getDepotIfEnabled(Options);
if (!Depot)
return;

u32 Trace = collectStackTrace();
u32 Trace = collectStackTrace(Depot);
u32 Tid = getThreadID();

auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
Expand All @@ -1283,14 +1313,14 @@ class Allocator {

void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
u8 PrevTag, uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
auto *Depot = getDepotIfEnabled(Options);
if (!Depot)
return;

auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];

u32 DeallocationTrace = collectStackTrace();
u32 DeallocationTrace = collectStackTrace(Depot);
u32 DeallocationTid = getThreadID();

storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
Expand Down Expand Up @@ -1369,8 +1399,10 @@ class Allocator {
UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
R->allocation_address = ChunkAddr;
R->allocation_size = Header.SizeOrUnusedBytes;
collectTraceMaybe(Depot, R->allocation_trace,
Data[MemTagAllocationTraceIndex]);
if (Depot) {
collectTraceMaybe(Depot, R->allocation_trace,
Data[MemTagAllocationTraceIndex]);
}
R->allocation_tid = Data[MemTagAllocationTidIndex];
return NextErrorReport == NumErrorReports;
};
Expand All @@ -1393,7 +1425,7 @@ class Allocator {
auto *RingBuffer =
reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
if (!RingBuffer || RingBufferElements == 0)
if (!RingBuffer || RingBufferElements == 0 || !Depot)
return;
uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);

Expand Down Expand Up @@ -1483,6 +1515,43 @@ class Allocator {
return;
u32 AllocationRingBufferSize =
static_cast<u32>(getFlags()->allocation_ring_buffer_size);

// We store alloc and free stacks for each entry.
constexpr u32 kStacksPerRingBufferEntry = 2;
constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
static_assert(isPowerOfTwo(kMaxU32Pow2));
constexpr u32 kFramesPerStack = 8;
static_assert(isPowerOfTwo(kFramesPerStack));

// We need StackDepot to be aligned to 8-bytes so the ring we store after
// is correctly assigned.
static_assert(sizeof(StackDepot) % alignof(atomic_u64) == 0);

// Make sure the maximum sized StackDepot fits withint a uintptr_t to
// simplify the overflow checking.
static_assert(sizeof(StackDepot) + UINT32_MAX * sizeof(atomic_u64) *
UINT32_MAX * sizeof(atomic_u32) <
UINTPTR_MAX);

if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
return;
u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
AllocationRingBufferSize));
if (TabSize > UINT32_MAX / kFramesPerStack)
return;
u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
DCHECK(isPowerOfTwo(RingSize));

StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
sizeof(atomic_u32) * TabSize;
MemMapT DepotMap;
DepotMap.map(
/*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
"scudo:stack_depot");
Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
Depot->init(RingSize, TabSize);
RawStackDepotMap = DepotMap;

MemMapT MemMap;
MemMap.map(
/*Addr=*/0U,
Expand All @@ -1505,6 +1574,10 @@ class Allocator {
RawRingBufferMap.getCapacity());
}
RawRingBuffer = nullptr;
if (Depot) {
RawStackDepotMap.unmap(RawStackDepotMap.getBase(),
RawStackDepotMap.getCapacity());
}
}

static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
Expand Down
14 changes: 5 additions & 9 deletions compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#define SCUDO_FUZZ
#include "allocator_config.h"
#include "combined.h"
#include "common.h"

#include <fuzzer/FuzzedDataProvider.h>

Expand All @@ -31,11 +32,6 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {

std::string StackDepotBytes =
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
++i) {
StackDepot[i] = StackDepotBytes[i];
}

std::string RegionInfoBytes =
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
Expand All @@ -48,9 +44,9 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();

scudo_error_info ErrorInfo;
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
RegionInfo.data(), RingBufferBytes.data(),
RingBufferBytes.size(), Memory, MemoryTags,
MemoryAddr, MemorySize);
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepotBytes.data(),
StackDepotBytes.size(), RegionInfo.data(),
RingBufferBytes.data(), RingBufferBytes.size(),
Memory, MemoryTags, MemoryAddr, MemorySize);
return 0;
}
10 changes: 0 additions & 10 deletions compiler-rt/lib/scudo/standalone/platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,16 +63,6 @@
#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
#endif

// Use smaller table sizes for fuzzing in order to reduce input size.
// Trusty just has less available memory.
#ifndef SCUDO_SMALL_STACK_DEPOT
#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
#define SCUDO_SMALL_STACK_DEPOT 1
#else
#define SCUDO_SMALL_STACK_DEPOT 0
#endif
#endif

#ifndef SCUDO_ENABLE_HOOKS
#define SCUDO_ENABLE_HOOKS 0
#endif
Expand Down
Loading