Skip to content

Revert "Reland "[scudo] resize stack depot for allocation ring buffer"" #82088

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 17, 2024

Conversation

fmayer
Copy link
Contributor

@fmayer fmayer commented Feb 17, 2024

Reverts #81028

@fmayer fmayer merged commit e1164d0 into main Feb 17, 2024
@fmayer fmayer deleted the revert-81028-initdepot branch February 17, 2024 01:58
@llvmbot
Copy link
Member

llvmbot commented Feb 17, 2024

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: Florian Mayer (fmayer)

Changes

Reverts llvm/llvm-project#81028


Full diff: https://github.com/llvm/llvm-project/pull/82088.diff

6 Files Affected:

  • (modified) compiler-rt/lib/scudo/standalone/combined.h (+24-97)
  • (modified) compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp (+9-5)
  • (modified) compiler-rt/lib/scudo/standalone/platform.h (+10)
  • (modified) compiler-rt/lib/scudo/standalone/stack_depot.h (+19-77)
  • (modified) compiler-rt/lib/scudo/standalone/tests/combined_test.cpp (+2-36)
  • (modified) compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp (+5-4)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 080ba42ad44497..62473d14d1f90b 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -9,7 +9,6 @@
 #ifndef SCUDO_COMBINED_H_
 #define SCUDO_COMBINED_H_
 
-#include "atomic_helpers.h"
 #include "chunk.h"
 #include "common.h"
 #include "flags.h"
@@ -283,7 +282,7 @@ class Allocator {
     return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
   }
 
-  NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
+  NOINLINE u32 collectStackTrace() {
 #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
     // Discard collectStackTrace() frame and allocator function frame.
     constexpr uptr DiscardFrames = 2;
@@ -291,7 +290,7 @@ class Allocator {
     uptr Size =
         android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
     Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
-    return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
+    return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
 #else
     return 0;
 #endif
@@ -688,12 +687,12 @@ class Allocator {
     Quarantine.disable();
     Primary.disable();
     Secondary.disable();
-    Depot->disable();
+    Depot.disable();
   }
 
   void enable() NO_THREAD_SAFETY_ANALYSIS {
     initThreadMaybe();
-    Depot->enable();
+    Depot.enable();
     Secondary.enable();
     Primary.enable();
     Quarantine.enable();
@@ -916,14 +915,8 @@ class Allocator {
       Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
   }
 
-  const char *getStackDepotAddress() {
-    initThreadMaybe();
-    return reinterpret_cast<char *>(Depot);
-  }
-
-  uptr getStackDepotSize() {
-    initThreadMaybe();
-    return StackDepotSize;
+  const char *getStackDepotAddress() const {
+    return reinterpret_cast<const char *>(&Depot);
   }
 
   const char *getRegionInfoArrayAddress() const {
@@ -952,35 +945,21 @@ class Allocator {
     if (!Depot->find(Hash, &RingPos, &Size))
       return;
     for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
-      Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
+      Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
   }
 
   static void getErrorInfo(struct scudo_error_info *ErrorInfo,
                            uintptr_t FaultAddr, const char *DepotPtr,
-                           size_t DepotSize, const char *RegionInfoPtr,
-                           const char *RingBufferPtr, size_t RingBufferSize,
-                           const char *Memory, const char *MemoryTags,
-                           uintptr_t MemoryAddr, size_t MemorySize) {
-    // N.B. we need to support corrupted data in any of the buffers here. We get
-    // this information from an external process (the crashing process) that
-    // should not be able to crash the crash dumper (crash_dump on Android).
-    // See also the get_error_info_fuzzer.
+                           const char *RegionInfoPtr, const char *RingBufferPtr,
+                           size_t RingBufferSize, const char *Memory,
+                           const char *MemoryTags, uintptr_t MemoryAddr,
+                           size_t MemorySize) {
     *ErrorInfo = {};
     if (!allocatorSupportsMemoryTagging<Config>() ||
         MemoryAddr + MemorySize < MemoryAddr)
       return;
 
-    const StackDepot *Depot = nullptr;
-    if (DepotPtr) {
-      // check for corrupted StackDepot. First we need to check whether we can
-      // read the metadata, then whether the metadata matches the size.
-      if (DepotSize < sizeof(*Depot))
-        return;
-      Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
-      if (!Depot->isValid(DepotSize))
-        return;
-    }
-
+    auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
     size_t NextErrorReport = 0;
 
     // Check for OOB in the current block and the two surrounding blocks. Beyond
@@ -1046,9 +1025,7 @@ class Allocator {
   uptr GuardedAllocSlotSize = 0;
 #endif // GWP_ASAN_HOOKS
 
-  StackDepot *Depot = nullptr;
-  uptr StackDepotSize = 0;
-  MemMapT RawStackDepotMap;
+  StackDepot Depot;
 
   struct AllocationRingBuffer {
     struct Entry {
@@ -1257,18 +1234,11 @@ class Allocator {
     storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
   }
 
-  StackDepot *getDepotIfEnabled(const Options &Options) {
-    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
-      return nullptr;
-    return Depot;
-  }
-
   void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
-    auto *Depot = getDepotIfEnabled(Options);
-    if (!Depot)
+    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
       return;
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
-    Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot);
+    Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
     Ptr32[MemTagAllocationTidIndex] = getThreadID();
   }
 
@@ -1298,10 +1268,10 @@ class Allocator {
 
   void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
                                           uptr Size) {
-    auto *Depot = getDepotIfEnabled(Options);
-    if (!Depot)
+    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
       return;
-    u32 Trace = collectStackTrace(Depot);
+
+    u32 Trace = collectStackTrace();
     u32 Tid = getThreadID();
 
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
@@ -1313,14 +1283,14 @@ class Allocator {
 
   void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
                                    u8 PrevTag, uptr Size) {
-    auto *Depot = getDepotIfEnabled(Options);
-    if (!Depot)
+    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
       return;
+
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
     u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
     u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
 
-    u32 DeallocationTrace = collectStackTrace(Depot);
+    u32 DeallocationTrace = collectStackTrace();
     u32 DeallocationTid = getThreadID();
 
     storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
@@ -1399,10 +1369,8 @@ class Allocator {
           UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
       R->allocation_address = ChunkAddr;
       R->allocation_size = Header.SizeOrUnusedBytes;
-      if (Depot) {
-        collectTraceMaybe(Depot, R->allocation_trace,
-                          Data[MemTagAllocationTraceIndex]);
-      }
+      collectTraceMaybe(Depot, R->allocation_trace,
+                        Data[MemTagAllocationTraceIndex]);
       R->allocation_tid = Data[MemTagAllocationTidIndex];
       return NextErrorReport == NumErrorReports;
     };
@@ -1425,7 +1393,7 @@ class Allocator {
     auto *RingBuffer =
         reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
     size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
-    if (!RingBuffer || RingBufferElements == 0 || !Depot)
+    if (!RingBuffer || RingBufferElements == 0)
       return;
     uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
 
@@ -1515,43 +1483,6 @@ class Allocator {
       return;
     u32 AllocationRingBufferSize =
         static_cast<u32>(getFlags()->allocation_ring_buffer_size);
-
-    // We store alloc and free stacks for each entry.
-    constexpr u32 kStacksPerRingBufferEntry = 2;
-    constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
-    static_assert(isPowerOfTwo(kMaxU32Pow2));
-    constexpr u32 kFramesPerStack = 8;
-    static_assert(isPowerOfTwo(kFramesPerStack));
-
-    // We need StackDepot to be aligned to 8-bytes so the ring we store after
-    // is correctly assigned.
-    static_assert(sizeof(StackDepot) % alignof(atomic_u64) == 0);
-
-    // Make sure the maximum sized StackDepot fits withint a uintptr_t to
-    // simplify the overflow checking.
-    static_assert(sizeof(StackDepot) + UINT32_MAX * sizeof(atomic_u64) *
-                                           UINT32_MAX * sizeof(atomic_u32) <
-                  UINTPTR_MAX);
-
-    if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
-      return;
-    u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
-                                                     AllocationRingBufferSize));
-    if (TabSize > UINT32_MAX / kFramesPerStack)
-      return;
-    u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
-    DCHECK(isPowerOfTwo(RingSize));
-
-    StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
-                     sizeof(atomic_u32) * TabSize;
-    MemMapT DepotMap;
-    DepotMap.map(
-        /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
-        "scudo:stack_depot");
-    Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
-    Depot->init(RingSize, TabSize);
-    RawStackDepotMap = DepotMap;
-
     MemMapT MemMap;
     MemMap.map(
         /*Addr=*/0U,
@@ -1574,10 +1505,6 @@ class Allocator {
                              RawRingBufferMap.getCapacity());
     }
     RawRingBuffer = nullptr;
-    if (Depot) {
-      RawStackDepotMap.unmap(RawStackDepotMap.getBase(),
-                             RawStackDepotMap.getCapacity());
-    }
   }
 
   static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index 2cef1c44fadc9b..5b01ebe11c0952 100644
--- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -9,7 +9,6 @@
 #define SCUDO_FUZZ
 #include "allocator_config.h"
 #include "combined.h"
-#include "common.h"
 
 #include <fuzzer/FuzzedDataProvider.h>
 
@@ -32,6 +31,11 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
 
   std::string StackDepotBytes =
       FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
+  std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
+  for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
+       ++i) {
+    StackDepot[i] = StackDepotBytes[i];
+  }
 
   std::string RegionInfoBytes =
       FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
@@ -44,9 +48,9 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
   std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
 
   scudo_error_info ErrorInfo;
-  AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepotBytes.data(),
-                           StackDepotBytes.size(), RegionInfo.data(),
-                           RingBufferBytes.data(), RingBufferBytes.size(),
-                           Memory, MemoryTags, MemoryAddr, MemorySize);
+  AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
+                           RegionInfo.data(), RingBufferBytes.data(),
+                           RingBufferBytes.size(), Memory, MemoryTags,
+                           MemoryAddr, MemorySize);
   return 0;
 }
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
index 5af1275e32d2b6..b71a86be7669f3 100644
--- a/compiler-rt/lib/scudo/standalone/platform.h
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -63,6 +63,16 @@
 #define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
 #endif
 
+// Use smaller table sizes for fuzzing in order to reduce input size.
+// Trusty just has less available memory.
+#ifndef SCUDO_SMALL_STACK_DEPOT
+#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
+#define SCUDO_SMALL_STACK_DEPOT 1
+#else
+#define SCUDO_SMALL_STACK_DEPOT 0
+#endif
+#endif
+
 #ifndef SCUDO_ENABLE_HOOKS
 #define SCUDO_ENABLE_HOOKS 0
 #endif
diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h
index 12a8488073b4a1..e887d1b43a7cf0 100644
--- a/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -10,7 +10,6 @@
 #define SCUDO_STACK_DEPOT_H_
 
 #include "atomic_helpers.h"
-#include "common.h"
 #include "mutex.h"
 
 namespace scudo {
@@ -39,7 +38,7 @@ class MurMur2HashBuilder {
   }
 };
 
-class alignas(atomic_u64) StackDepot {
+class StackDepot {
   HybridMutex RingEndMu;
   u32 RingEnd = 0;
 
@@ -63,81 +62,28 @@ class alignas(atomic_u64) StackDepot {
   // This is achieved by re-checking the hash of the stack trace before
   // returning the trace.
 
-  uptr RingSize = 0;
-  uptr RingMask = 0;
-  uptr TabMask = 0;
-  // This is immediately followed by RingSize atomic_u64 and
-  // (TabMask + 1) atomic_u32.
-
-  atomic_u64 *getRing() {
-    return reinterpret_cast<atomic_u64 *>(reinterpret_cast<char *>(this) +
-                                          sizeof(StackDepot));
-  }
-
-  atomic_u32 *getTab() {
-    return reinterpret_cast<atomic_u32 *>(reinterpret_cast<char *>(this) +
-                                          sizeof(StackDepot) +
-                                          sizeof(atomic_u64) * RingSize);
-  }
-
-  const atomic_u64 *getRing() const {
-    return reinterpret_cast<const atomic_u64 *>(
-        reinterpret_cast<const char *>(this) + sizeof(StackDepot));
-  }
-
-  const atomic_u32 *getTab() const {
-    return reinterpret_cast<const atomic_u32 *>(
-        reinterpret_cast<const char *>(this) + sizeof(StackDepot) +
-        sizeof(atomic_u64) * RingSize);
-  }
+#if SCUDO_SMALL_STACK_DEPOT
+  static const uptr TabBits = 4;
+#else
+  static const uptr TabBits = 16;
+#endif
+  static const uptr TabSize = 1 << TabBits;
+  static const uptr TabMask = TabSize - 1;
+  atomic_u32 Tab[TabSize] = {};
+
+#if SCUDO_SMALL_STACK_DEPOT
+  static const uptr RingBits = 4;
+#else
+  static const uptr RingBits = 19;
+#endif
+  static const uptr RingSize = 1 << RingBits;
+  static const uptr RingMask = RingSize - 1;
+  atomic_u64 Ring[RingSize] = {};
 
 public:
-  void init(uptr RingSz, uptr TabSz) {
-    DCHECK(isPowerOfTwo(RingSz));
-    DCHECK(isPowerOfTwo(TabSz));
-    RingSize = RingSz;
-    RingMask = RingSz - 1;
-    TabMask = TabSz - 1;
-  }
-
-  // Ensure that RingSize, RingMask and TabMask are set up in a way that
-  // all accesses are within range of BufSize.
-  bool isValid(uptr BufSize) const {
-    if (RingSize > UINTPTR_MAX / sizeof(atomic_u64))
-      return false;
-    if (RingSize == 0 || !isPowerOfTwo(RingSize))
-      return false;
-    uptr RingBytes = sizeof(atomic_u64) * RingSize;
-    if (RingMask + 1 != RingSize)
-      return false;
-
-    if (TabMask == 0)
-      return false;
-    if ((TabMask - 1) > UINTPTR_MAX / sizeof(atomic_u32))
-      return false;
-    uptr TabSize = TabMask + 1;
-    if (!isPowerOfTwo(TabSize))
-      return false;
-    uptr TabBytes = sizeof(atomic_u32) * TabSize;
-
-    // Subtract and detect underflow.
-    if (BufSize < sizeof(StackDepot))
-      return false;
-    BufSize -= sizeof(StackDepot);
-    if (BufSize < TabBytes)
-      return false;
-    BufSize -= TabBytes;
-    if (BufSize < RingBytes)
-      return false;
-    return BufSize == RingBytes;
-  }
-
   // Insert hash of the stack trace [Begin, End) into the stack depot, and
   // return the hash.
   u32 insert(uptr *Begin, uptr *End) {
-    auto *Tab = getTab();
-    auto *Ring = getRing();
-
     MurMur2HashBuilder B;
     for (uptr *I = Begin; I != End; ++I)
       B.add(u32(*I) >> 2);
@@ -166,9 +112,6 @@ class alignas(atomic_u64) StackDepot {
   // accessed via operator[] passing indexes between *RingPosPtr and
   // *RingPosPtr + *SizePtr.
   bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
-    auto *Tab = getTab();
-    auto *Ring = getRing();
-
     u32 Pos = Hash & TabMask;
     u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
     if (RingPos >= RingSize)
@@ -190,8 +133,7 @@ class alignas(atomic_u64) StackDepot {
     return B.get() == Hash;
   }
 
-  u64 at(uptr RingPos) const {
-    auto *Ring = getRing();
+  u64 operator[](uptr RingPos) const {
     return atomic_load_relaxed(&Ring[RingPos & RingMask]);
   }
 
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 3785e2261d84e3..13f5ac132837b3 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -7,7 +7,6 @@
 //===----------------------------------------------------------------------===//
 
 #include "memtag.h"
-#include "stack_depot.h"
 #include "tests/scudo_unit_test.h"
 
 #include "allocator_config.h"
@@ -871,8 +870,8 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
   auto *Allocator = this->Allocator.get();
   auto Size = Allocator->getRingBufferSize();
-  ASSERT_GT(Size, 0);
-  EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
+  if (Size > 0)
+    EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
 }
 
 SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
@@ -882,39 +881,6 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
   EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
 }
 
-SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) {
-  auto *Allocator = this->Allocator.get();
-  auto Size = Allocator->getStackDepotSize();
-  ASSERT_GT(Size, 0);
-  EXPECT_EQ(Allocator->getStackDepotAddress()[Size - 1], '\0');
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) {
-  auto *Allocator = this->Allocator.get();
-  auto *Addr = Allocator->getStackDepotAddress();
-  EXPECT_NE(Addr, nullptr);
-  EXPECT_EQ(Addr, Allocator->getStackDepotAddress());
-}
-
-SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) {
-  alignas(scudo::StackDepot) char Buf[sizeof(scudo::StackDepot) +
-                                      1024 * sizeof(scudo::atomic_u64) +
-                                      1024 * sizeof(scudo::atomic_u32)] = {};
-  auto *Depot = reinterpret_cast<scudo::StackDepot *>(Buf);
-  Depot->init(1024, 1024);
-  ASSERT_TRUE(Depot->isValid(sizeof(Buf)));
-  ASSERT_FALSE(Depot->isValid(sizeof(Buf) - 1));
-  scudo::uptr Stack[] = {1, 2, 3};
-  scudo::u32 Elem = Depot->insert(&Stack[0], &Stack[3]);
-  scudo::uptr RingPosPtr = 0;
-  scudo::uptr SizePtr = 0;
-  ASSERT_TRUE(Depot->find(Elem, &RingPosPtr, &SizePtr));
-  ASSERT_EQ(SizePtr, 3);
-  EXPECT_EQ(Depot->at(RingPosPtr), 1);
-  EXPECT_EQ(Depot->at(RingPosPtr + 1), 2);
-  EXPECT_EQ(Depot->at(RingPosPtr + 2), 3);
-}
-
 #if SCUDO_CAN_USE_PRIMARY64
 #if SCUDO_TRUSTY
 
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index e9d8c1e8d3db2f..21694c3f17fe57 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -43,9 +43,10 @@ INTERFACE void __scudo_get_error_info(
     const char *stack_depot, size_t stack_depot_size, const char *region_info,
     const char *ring_buffer, size_t ring_buffer_size, const char *memory,
     const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
-  Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size,
-                         region_info, ring_buffer, ring_buffer_size, memory,
-                         memory_tags, memory_addr, memory_size);
+  (void)(stack_depot_size);
+  Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
+                         ring_buffer, ring_buffer_size, memory, memory_tags,
+                         memory_addr, memory_size);
 }
 
 INTERFACE const char *__scudo_get_stack_depot_addr() {
@@ -53,7 +54,7 @@ INTERFACE const char *__scudo_get_stack_depot_addr() {
 }
 
 INTERFACE size_t __scudo_get_stack_depot_size() {
-  return Allocator.getStackDepotSize();
+  return sizeof(scudo::StackDepot);
 }
 
 INTERFACE const char *__scudo_get_region_info_addr() {

jrguzman-ms pushed a commit to msft-mirror-aosp/platform.external.scudo that referenced this pull request Mar 3, 2024
This reverts commit ef3ce98.

Reason for revert: llvm/llvm-project#82088

Change-Id: I7b4df93fbd985227be82dc85aa17b07d1165aab2
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants