Skip to content

Reland "[scudo] resize stack depot for allocation ring buffer" #81028

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 17, 2024

Conversation

fmayer
Copy link
Contributor

@fmayer fmayer commented Feb 7, 2024

First commit of the stack is a clean reland, second is the fix.

There was a typo in the static_assert that meant we were asserting the size of the pointer, not the struct.

Also changed alignas to be more intuitive, but that is NFC.

Ran builds in Android here: https://r.android.com/2954411

@llvmbot
Copy link
Member

llvmbot commented Feb 7, 2024

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: Florian Mayer (fmayer)

Changes

First commit of the stack is a clean reland, second is the fix.

There was a typo in the static_assert that meant we were asserting the size of the pointer, not the struct.

Also changed alignas to be more intuitive, but that is NFC.

Ran builds in Android here: https://r.android.com/2954411


Full diff: https://github.com/llvm/llvm-project/pull/81028.diff

6 Files Affected:

  • (modified) compiler-rt/lib/scudo/standalone/combined.h (+97-24)
  • (modified) compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp (+5-9)
  • (modified) compiler-rt/lib/scudo/standalone/platform.h (-10)
  • (modified) compiler-rt/lib/scudo/standalone/stack_depot.h (+77-19)
  • (modified) compiler-rt/lib/scudo/standalone/tests/combined_test.cpp (+36-2)
  • (modified) compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp (+4-5)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 62473d14d1f90b..3e5ace599ce31d 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -9,6 +9,7 @@
 #ifndef SCUDO_COMBINED_H_
 #define SCUDO_COMBINED_H_
 
+#include "atomic_helpers.h"
 #include "chunk.h"
 #include "common.h"
 #include "flags.h"
@@ -282,7 +283,7 @@ class Allocator {
     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
   }
 
-  NOINLINE u32 collectStackTrace() {
+  NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
     // Discard collectStackTrace() frame and allocator function frame.
     constexpr uptr DiscardFrames = 2;
@@ -290,7 +291,7 @@ class Allocator {
     uptr Size =
         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
-    return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
+    return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
 #else
     return 0;
 #endif
@@ -687,12 +688,12 @@ class Allocator {
     Quarantine.disable();
     Primary.disable();
     Secondary.disable();
-    Depot.disable();
+    Depot->disable();
   }
 
   void enable() NO_THREAD_SAFETY_ANALYSIS {
     initThreadMaybe();
-    Depot.enable();
+    Depot->enable();
     Secondary.enable();
     Primary.enable();
     Quarantine.enable();
@@ -915,8 +916,14 @@ class Allocator {
       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
   }
 
-  const char *getStackDepotAddress() const {
-    return reinterpret_cast<const char *>(&Depot);
+  const char *getStackDepotAddress() {
+    initThreadMaybe();
+    return reinterpret_cast<char *>(Depot);
+  }
+
+  uptr getStackDepotSize() {
+    initThreadMaybe();
+    return StackDepotSize;
   }
 
   const char *getRegionInfoArrayAddress() const {
@@ -945,21 +952,35 @@ class Allocator {
     if (!Depot->find(Hash, &RingPos, &Size))
       return;
     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
-      Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
+      Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
   }
 
   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
                            uintptr_t FaultAddr, const char *DepotPtr,
-                           const char *RegionInfoPtr, const char *RingBufferPtr,
-                           size_t RingBufferSize, const char *Memory,
-                           const char *MemoryTags, uintptr_t MemoryAddr,
-                           size_t MemorySize) {
+                           size_t DepotSize, const char *RegionInfoPtr,
+                           const char *RingBufferPtr, size_t RingBufferSize,
+                           const char *Memory, const char *MemoryTags,
+                           uintptr_t MemoryAddr, size_t MemorySize) {
+    // N.B. we need to support corrupted data in any of the buffers here. We get
+    // this information from an external process (the crashing process) that
+    // should not be able to crash the crash dumper (crash_dump on Android).
+    // See also the get_error_info_fuzzer.
     *ErrorInfo = {};
     if (!allocatorSupportsMemoryTagging<Config>() ||
         MemoryAddr + MemorySize < MemoryAddr)
       return;
 
-    auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
+    const StackDepot *Depot = nullptr;
+    if (DepotPtr) {
+      // check for corrupted StackDepot. First we need to check whether we can
+      // read the metadata, then whether the metadata matches the size.
+      if (DepotSize < sizeof(*Depot))
+        return;
+      Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
+      if (!Depot->isValid(DepotSize))
+        return;
+    }
+
     size_t NextErrorReport = 0;
 
     // Check for OOB in the current block and the two surrounding blocks. Beyond
@@ -1025,7 +1046,9 @@ class Allocator {
   uptr GuardedAllocSlotSize = 0;
 #endif // GWP_ASAN_HOOKS
 
-  StackDepot Depot;
+  StackDepot *Depot = nullptr;
+  uptr StackDepotSize = 0;
+  MemMapT RawStackDepotMap;
 
   struct AllocationRingBuffer {
     struct Entry {
@@ -1234,11 +1257,18 @@ class Allocator {
     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
   }
 
-  void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
+  StackDepot *getDepotIfEnabled(const Options &Options) {
     if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+      return nullptr;
+    return Depot;
+  }
+
+  void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
+    auto *Depot = getDepotIfEnabled(Options);
+    if (!Depot)
       return;
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
-    Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
+    Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot);
     Ptr32[MemTagAllocationTidIndex] = getThreadID();
   }
 
@@ -1268,10 +1298,10 @@ class Allocator {
 
   void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
                                           uptr Size) {
-    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+    auto *Depot = getDepotIfEnabled(Options);
+    if (!Depot)
       return;
-
-    u32 Trace = collectStackTrace();
+    u32 Trace = collectStackTrace(Depot);
     u32 Tid = getThreadID();
 
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
@@ -1283,14 +1313,14 @@ class Allocator {
 
   void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
                                    u8 PrevTag, uptr Size) {
-    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+    auto *Depot = getDepotIfEnabled(Options);
+    if (!Depot)
       return;
-
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
 
-    u32 DeallocationTrace = collectStackTrace();
+    u32 DeallocationTrace = collectStackTrace(Depot);
     u32 DeallocationTid = getThreadID();
 
     storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
@@ -1369,8 +1399,10 @@ class Allocator {
           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
       R->allocation_address = ChunkAddr;
       R->allocation_size = Header.SizeOrUnusedBytes;
-      collectTraceMaybe(Depot, R->allocation_trace,
-                        Data[MemTagAllocationTraceIndex]);
+      if (Depot) {
+        collectTraceMaybe(Depot, R->allocation_trace,
+                          Data[MemTagAllocationTraceIndex]);
+      }
       R->allocation_tid = Data[MemTagAllocationTidIndex];
       return NextErrorReport == NumErrorReports;
     };
@@ -1393,7 +1425,7 @@ class Allocator {
     auto *RingBuffer =
         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
     size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
-    if (!RingBuffer || RingBufferElements == 0)
+    if (!RingBuffer || RingBufferElements == 0 || !Depot)
       return;
     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
 
@@ -1483,6 +1515,43 @@ class Allocator {
       return;
     u32 AllocationRingBufferSize =
         static_cast<u32>(getFlags()->allocation_ring_buffer_size);
+
+    // We store alloc and free stacks for each entry.
+    constexpr u32 kStacksPerRingBufferEntry = 2;
+    constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
+    static_assert(isPowerOfTwo(kMaxU32Pow2));
+    constexpr u32 kFramesPerStack = 8;
+    static_assert(isPowerOfTwo(kFramesPerStack));
+
+    // We need StackDepot to be aligned to 8-bytes so the ring we store after
+    // is correctly assigned.
+    static_assert(sizeof(StackDepot) % alignof(atomic_u64) == 0);
+
+    // Make sure the maximum sized StackDepot fits withint a uintptr_t to
+    // simplify the overflow checking.
+    static_assert(sizeof(StackDepot) + UINT32_MAX * sizeof(atomic_u64) *
+                                           UINT32_MAX * sizeof(atomic_u32) <
+                  UINTPTR_MAX);
+
+    if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
+      return;
+    u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
+                                                     AllocationRingBufferSize));
+    if (TabSize > UINT32_MAX / kFramesPerStack)
+      return;
+    u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
+    DCHECK(isPowerOfTwo(RingSize));
+
+    StackDepotSize = sizeof(Depot) + sizeof(atomic_u64) * RingSize +
+                     sizeof(atomic_u32) * TabSize;
+    MemMapT DepotMap;
+    DepotMap.map(
+        /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
+        "scudo:stack_depot");
+    Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
+    Depot->init(RingSize, TabSize);
+    RawStackDepotMap = DepotMap;
+
     MemMapT MemMap;
     MemMap.map(
         /*Addr=*/0U,
@@ -1505,6 +1574,10 @@ class Allocator {
                              RawRingBufferMap.getCapacity());
     }
     RawRingBuffer = nullptr;
+    if (Depot) {
+      RawStackDepotMap.unmap(RawStackDepotMap.getBase(),
+                             RawStackDepotMap.getCapacity());
+    }
   }
 
   static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index 5b01ebe11c0952..2cef1c44fadc9b 100644
--- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -9,6 +9,7 @@
 #define SCUDO_FUZZ
 #include "allocator_config.h"
 #include "combined.h"
+#include "common.h"
 
 #include <fuzzer/FuzzedDataProvider.h>
 
@@ -31,11 +32,6 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
 
   std::string StackDepotBytes =
       FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
-  std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
-  for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
-       ++i) {
-    StackDepot[i] = StackDepotBytes[i];
-  }
 
   std::string RegionInfoBytes =
       FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
@@ -48,9 +44,9 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
   std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
 
   scudo_error_info ErrorInfo;
-  AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
-                           RegionInfo.data(), RingBufferBytes.data(),
-                           RingBufferBytes.size(), Memory, MemoryTags,
-                           MemoryAddr, MemorySize);
+  AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepotBytes.data(),
+                           StackDepotBytes.size(), RegionInfo.data(),
+                           RingBufferBytes.data(), RingBufferBytes.size(),
+                           Memory, MemoryTags, MemoryAddr, MemorySize);
   return 0;
 }
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
index b71a86be7669f3..5af1275e32d2b6 100644
--- a/compiler-rt/lib/scudo/standalone/platform.h
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -63,16 +63,6 @@
 #define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
 #endif
 
-// Use smaller table sizes for fuzzing in order to reduce input size.
-// Trusty just has less available memory.
-#ifndef SCUDO_SMALL_STACK_DEPOT
-#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
-#define SCUDO_SMALL_STACK_DEPOT 1
-#else
-#define SCUDO_SMALL_STACK_DEPOT 0
-#endif
-#endif
-
 #ifndef SCUDO_ENABLE_HOOKS
 #define SCUDO_ENABLE_HOOKS 0
 #endif
diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h
index e887d1b43a7cf0..12a8488073b4a1 100644
--- a/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -10,6 +10,7 @@
 #define SCUDO_STACK_DEPOT_H_
 
 #include "atomic_helpers.h"
+#include "common.h"
 #include "mutex.h"
 
 namespace scudo {
@@ -38,7 +39,7 @@ class MurMur2HashBuilder {
   }
 };
 
-class StackDepot {
+class alignas(atomic_u64) StackDepot {
   HybridMutex RingEndMu;
   u32 RingEnd = 0;
 
@@ -62,28 +63,81 @@ class StackDepot {
   // This is achieved by re-checking the hash of the stack trace before
   // returning the trace.
 
-#if SCUDO_SMALL_STACK_DEPOT
-  static const uptr TabBits = 4;
-#else
-  static const uptr TabBits = 16;
-#endif
-  static const uptr TabSize = 1 << TabBits;
-  static const uptr TabMask = TabSize - 1;
-  atomic_u32 Tab[TabSize] = {};
-
-#if SCUDO_SMALL_STACK_DEPOT
-  static const uptr RingBits = 4;
-#else
-  static const uptr RingBits = 19;
-#endif
-  static const uptr RingSize = 1 << RingBits;
-  static const uptr RingMask = RingSize - 1;
-  atomic_u64 Ring[RingSize] = {};
+  uptr RingSize = 0;
+  uptr RingMask = 0;
+  uptr TabMask = 0;
+  // This is immediately followed by RingSize atomic_u64 and
+  // (TabMask + 1) atomic_u32.
+
+  atomic_u64 *getRing() {
+    return reinterpret_cast<atomic_u64 *>(reinterpret_cast<char *>(this) +
+                                          sizeof(StackDepot));
+  }
+
+  atomic_u32 *getTab() {
+    return reinterpret_cast<atomic_u32 *>(reinterpret_cast<char *>(this) +
+                                          sizeof(StackDepot) +
+                                          sizeof(atomic_u64) * RingSize);
+  }
+
+  const atomic_u64 *getRing() const {
+    return reinterpret_cast<const atomic_u64 *>(
+        reinterpret_cast<const char *>(this) + sizeof(StackDepot));
+  }
+
+  const atomic_u32 *getTab() const {
+    return reinterpret_cast<const atomic_u32 *>(
+        reinterpret_cast<const char *>(this) + sizeof(StackDepot) +
+        sizeof(atomic_u64) * RingSize);
+  }
 
 public:
+  void init(uptr RingSz, uptr TabSz) {
+    DCHECK(isPowerOfTwo(RingSz));
+    DCHECK(isPowerOfTwo(TabSz));
+    RingSize = RingSz;
+    RingMask = RingSz - 1;
+    TabMask = TabSz - 1;
+  }
+
+  // Ensure that RingSize, RingMask and TabMask are set up in a way that
+  // all accesses are within range of BufSize.
+  bool isValid(uptr BufSize) const {
+    if (RingSize > UINTPTR_MAX / sizeof(atomic_u64))
+      return false;
+    if (RingSize == 0 || !isPowerOfTwo(RingSize))
+      return false;
+    uptr RingBytes = sizeof(atomic_u64) * RingSize;
+    if (RingMask + 1 != RingSize)
+      return false;
+
+    if (TabMask == 0)
+      return false;
+    if ((TabMask - 1) > UINTPTR_MAX / sizeof(atomic_u32))
+      return false;
+    uptr TabSize = TabMask + 1;
+    if (!isPowerOfTwo(TabSize))
+      return false;
+    uptr TabBytes = sizeof(atomic_u32) * TabSize;
+
+    // Subtract and detect underflow.
+    if (BufSize < sizeof(StackDepot))
+      return false;
+    BufSize -= sizeof(StackDepot);
+    if (BufSize < TabBytes)
+      return false;
+    BufSize -= TabBytes;
+    if (BufSize < RingBytes)
+      return false;
+    return BufSize == RingBytes;
+  }
+
   // Insert hash of the stack trace [Begin, End) into the stack depot, and
   // return the hash.
   u32 insert(uptr *Begin, uptr *End) {
+    auto *Tab = getTab();
+    auto *Ring = getRing();
+
     MurMur2HashBuilder B;
     for (uptr *I = Begin; I != End; ++I)
       B.add(u32(*I) >> 2);
@@ -112,6 +166,9 @@ class StackDepot {
   // accessed via operator[] passing indexes between *RingPosPtr and
   // *RingPosPtr + *SizePtr.
   bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
+    auto *Tab = getTab();
+    auto *Ring = getRing();
+
     u32 Pos = Hash & TabMask;
     u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
     if (RingPos >= RingSize)
@@ -133,7 +190,8 @@ class StackDepot {
     return B.get() == Hash;
   }
 
-  u64 operator[](uptr RingPos) const {
+  u64 at(uptr RingPos) const {
+    auto *Ring = getRing();
     return atomic_load_relaxed(&Ring[RingPos & RingMask]);
   }
 
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 13f5ac132837b3..3785e2261d84e3 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -7,6 +7,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "memtag.h"
+#include "stack_depot.h"
 #include "tests/scudo_unit_test.h"
 
 #include "allocator_config.h"
@@ -870,8 +871,8 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
   auto *Allocator = this->Allocator.get();
   auto Size = Allocator->getRingBufferSize();
-  if (Size > 0)
-    EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
+  ASSERT_GT(Size, 0);
+  EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
 }
 
 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
@@ -881,6 +882,39 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
   EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
 }
 
+SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) {
+  auto *Allocator = this->Allocator.get();
+  auto Size = Allocator->getStackDepotSize();
+  ASSERT_GT(Size, 0);
+  EXPECT_EQ(Allocator->getStackDepotAddress()[Size - 1], '\0');
+}
+
+SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) {
+  auto *Allocator = this->Allocator.get();
+  auto *Addr = Allocator->getStackDepotAddress();
+  EXPECT_NE(Addr, nullptr);
+  EXPECT_EQ(Addr, Allocator->getStackDepotAddress());
+}
+
+SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) {
+  alignas(scudo::StackDepot) char Buf[sizeof(scudo::StackDepot) +
+                                      1024 * sizeof(scudo::atomic_u64) +
+                                      1024 * sizeof(scudo::atomic_u32)] = {};
+  auto *Depot = reinterpret_cast<scudo::StackDepot *>(Buf);
+  Depot->init(1024, 1024);
+  ASSERT_TRUE(Depot->isValid(sizeof(Buf)));
+  ASSERT_FALSE(Depot->isValid(sizeof(Buf) - 1));
+  scudo::uptr Stack[] = {1, 2, 3};
+  scudo::u32 Elem = Depot->insert(&Stack[0], &Stack[3]);
+  scudo::uptr RingPosPtr = 0;
+  scudo::uptr SizePtr = 0;
+  ASSERT_TRUE(Depot->find(Elem, &RingPosPtr, &SizePtr));
+  ASSERT_EQ(SizePtr, 3);
+  EXPECT_EQ(Depot->at(RingPosPtr), 1);
+  EXPECT_EQ(Depot->at(RingPosPtr + 1), 2);
+  EXPECT_EQ(Depot->at(RingPosPtr + 2), 3);
+}
+
 #if SCUDO_CAN_USE_PRIMARY64
 #if SCUDO_TRUSTY
 
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index 21694c3f17fe57..e9d8c1e8d3db2f 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -43,10 +43,9 @@ INTERFACE void __scudo_get_error_info(
     const char *stack_depot, size_t stack_depot_size, const char *region_info,
     const char *ring_buffer, size_t ring_buffer_size, const char *memory,
     const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
-  (void)(stack_depot_size);
-  Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
-                         ring_buffer, ring_buffer_size, memory, memory_tags,
-                         memory_addr, memory_size);
+  Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size,
+                         region_info, ring_buffer, ring_buffer_size, memory,
+                         memory_tags, memory_addr, memory_size);
 }
 
 INTERFACE const char *__scudo_get_stack_depot_addr() {
@@ -54,7 +53,7 @@ INTERFACE const char *__scudo_get_stack_depot_addr() {
 }
 
 INTERFACE size_t __scudo_get_stack_depot_size() {
-  return sizeof(scudo::StackDepot);
+  return Allocator.getStackDepotSize();
 }
 
 INTERFACE const char *__scudo_get_region_info_addr() {

@fmayer fmayer merged commit aff6cb4 into llvm:main Feb 17, 2024
@fmayer
Copy link
Contributor Author

fmayer commented Feb 17, 2024

I will fix forward the compile error around u32 on https://lab.llvm.org/buildbot/#/builders/165/builds/48851

fmayer added a commit that referenced this pull request Feb 17, 2024
jrguzman-ms pushed a commit to msft-mirror-aosp/platform.external.scudo that referenced this pull request Feb 21, 2024
…"" (#82088)

Reverts llvm/llvm-project#81028

GitOrigin-RevId: e1164d063558b1e89f20109d83c079caae1825d8
Change-Id: I64c1e79689191f3f126098d4a5d0d3b653304895
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants