Skip to content

Commit 01bf29b

Browse files
authored
[sanitizers] Optimize locking StackDepotBase for fork (#76280)
Locking StackDepotBase fully is very expensive, as 2^20 buckets needs to be locked. Not locking, but only unlocking buckets, needed to be unlocked to avoid deadlocks, increases a chance of data race, when the value with same hash can be inserted into table twice, but one is lost. However this is just a small additional memory usage by forked process.
1 parent 4358e6e commit 01bf29b

File tree

2 files changed

+22
-4
lines changed

2 files changed

+22
-4
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,10 @@ class TwoLevelMap {
109109
return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
110110
}
111111

112+
void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Lock(); }
113+
114+
void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Unlock(); }
115+
112116
private:
113117
constexpr uptr MmapSize() const {
114118
return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());

compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -161,18 +161,32 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
161161

162162
template <class Node, int kReservedBits, int kTabSizeLog>
163163
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockBeforeFork() {
164-
for (int i = 0; i < kTabSize; ++i) {
165-
lock(&tab[i]);
166-
}
164+
// Do not lock hash table. It's very expensive, but it's not rely needed. The
165+
// parent process will neither lock nor unlock. Child process risks to be
166+
// deadlocked on already locked buckets. To avoid deadlock we will unlock
167+
// every locked buckets in `UnlockAfterFork`. This may affect consistency of
168+
// the hash table, but the only issue is a few items inserted by parent
169+
// process will be not found by child, and the child may insert them again,
170+
// wasting some space in `stackStore`.
171+
172+
// We still need to lock nodes.
173+
nodes.Lock();
167174
}
168175

169176
template <class Node, int kReservedBits, int kTabSizeLog>
170177
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAfterFork(
171178
bool fork_child) {
179+
nodes.Unlock();
180+
181+
// Only unlock in child process to avoid deadlock. See `LockBeforeFork`.
182+
if (!fork_child)
183+
return;
184+
172185
for (int i = 0; i < kTabSize; ++i) {
173186
atomic_uint32_t *p = &tab[i];
174187
uptr s = atomic_load(p, memory_order_relaxed);
175-
unlock(p, s & kUnlockMask);
188+
if (s & kLockMask)
189+
unlock(p, s & kUnlockMask);
176190
}
177191
}
178192

0 commit comments

Comments
 (0)