Skip to content

Commit 0224399

Browse files
committed
sanitizer_common: add deadlock detection to the Mutex2
Copy internal deadlock detector from tsan to sanitizer_common (with some cosmetic changes). Tsan version will be deleted in subsequent changes. This allows us to switch tsan to the sanitizer_common mutex and remove tsan's mutex. Reviewed By: vitalybuka, melver Differential Revision: https://reviews.llvm.org/D106546
1 parent fde7550 commit 0224399

File tree

2 files changed

+254
-1
lines changed

2 files changed

+254
-1
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp

Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,4 +48,178 @@ void Semaphore::Post(u32 count) {
4848
FutexWake(&state_, count);
4949
}
5050

51+
#if SANITIZER_CHECK_DEADLOCKS
52+
// An empty mutex meta table, it effectively disables deadlock detection.
53+
// Each tool can override the table to define own mutex hierarchy and
54+
// enable deadlock detection.
55+
// The table defines a static mutex type hierarchy (what mutex types can be locked
56+
// under what mutex types). This table is checked to be acyclic and then
57+
// actual mutex lock/unlock operations are checked to adhere to this hierarchy.
58+
// The checking happens on mutex types rather than on individual mutex instances
59+
// because doing it on mutex instances will both significantly complicate
60+
// the implementation, worsen performance and memory overhead and is mostly
61+
// unnecessary (we almost never lock multiple mutexes of the same type recursively).
62+
static constexpr int kMutexTypeMax = 20;
63+
SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
64+
SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
65+
static StaticSpinMutex mutex_meta_mtx;
66+
static int mutex_type_count = -1;
67+
// Adjacency matrix of what mutexes can be locked under what mutexes.
68+
static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
69+
// Mutex types with MutexMulti mark.
70+
static bool mutex_multi[kMutexTypeMax];
71+
72+
void DebugMutexInit() {
73+
// Build adjacency matrix.
74+
bool leaf[kMutexTypeMax];
75+
internal_memset(&leaf, 0, sizeof(leaf));
76+
int cnt[kMutexTypeMax] = {};
77+
internal_memset(&cnt, 0, sizeof(cnt));
78+
for (int t = 0; t < kMutexTypeMax; t++) {
79+
mutex_type_count = t;
80+
if (!mutex_meta[t].name)
81+
break;
82+
CHECK_EQ(t, mutex_meta[t].type);
83+
for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
84+
MutexType z = mutex_meta[t].can_lock[j];
85+
if (z == MutexInvalid)
86+
break;
87+
if (z == MutexLeaf) {
88+
CHECK(!leaf[t]);
89+
leaf[t] = true;
90+
continue;
91+
}
92+
if (z == MutexMulti) {
93+
mutex_multi[t] = true;
94+
continue;
95+
}
96+
CHECK_LT(z, kMutexTypeMax);
97+
CHECK(!mutex_can_lock[t][z]);
98+
mutex_can_lock[t][z] = true;
99+
cnt[t]++;
100+
}
101+
}
102+
// Indicates the array is not properly terminated.
103+
CHECK_LT(mutex_type_count, kMutexTypeMax);
104+
// Add leaf mutexes.
105+
for (int t = 0; t < mutex_type_count; t++) {
106+
if (!leaf[t])
107+
continue;
108+
CHECK_EQ(cnt[t], 0);
109+
for (int z = 0; z < mutex_type_count; z++) {
110+
if (z == MutexInvalid || t == z || leaf[z])
111+
continue;
112+
CHECK(!mutex_can_lock[z][t]);
113+
mutex_can_lock[z][t] = true;
114+
}
115+
}
116+
// Build the transitive closure and check that the graphs is acyclic.
117+
u32 trans[kMutexTypeMax];
118+
static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
119+
"kMutexTypeMax does not fit into u32, switch to u64");
120+
internal_memset(&trans, 0, sizeof(trans));
121+
for (int i = 0; i < mutex_type_count; i++) {
122+
for (int j = 0; j < mutex_type_count; j++)
123+
if (mutex_can_lock[i][j])
124+
trans[i] |= 1 << j;
125+
}
126+
for (int k = 0; k < mutex_type_count; k++) {
127+
for (int i = 0; i < mutex_type_count; i++) {
128+
if (trans[i] & (1 << k))
129+
trans[i] |= trans[k];
130+
}
131+
}
132+
for (int i = 0; i < mutex_type_count; i++) {
133+
if (trans[i] & (1 << i)) {
134+
Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
135+
Die();
136+
}
137+
}
138+
}
139+
140+
struct InternalDeadlockDetector {
141+
struct LockDesc {
142+
u64 seq;
143+
uptr pc;
144+
int recursion;
145+
};
146+
int initialized;
147+
u64 sequence;
148+
LockDesc locked[kMutexTypeMax];
149+
150+
void Lock(MutexType type, uptr pc) {
151+
if (!Initialize(type))
152+
return;
153+
CHECK_LT(type, mutex_type_count);
154+
// Find the last locked mutex type.
155+
// This is the type we will use for hierarchy checks.
156+
u64 max_seq = 0;
157+
MutexType max_idx = MutexInvalid;
158+
for (int i = 0; i != mutex_type_count; i++) {
159+
if (locked[i].seq == 0)
160+
continue;
161+
CHECK_NE(locked[i].seq, max_seq);
162+
if (max_seq < locked[i].seq) {
163+
max_seq = locked[i].seq;
164+
max_idx = (MutexType)i;
165+
}
166+
}
167+
if (max_idx == type && mutex_multi[type]) {
168+
// Recursive lock of the same type.
169+
CHECK_EQ(locked[type].seq, max_seq);
170+
CHECK(locked[type].pc);
171+
locked[type].recursion++;
172+
return;
173+
}
174+
if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
175+
Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
176+
mutex_meta[type].name, mutex_meta[max_idx].name);
177+
PrintMutexPC(pc);
178+
CHECK(0);
179+
}
180+
locked[type].seq = ++sequence;
181+
locked[type].pc = pc;
182+
locked[type].recursion = 1;
183+
}
184+
185+
void Unlock(MutexType type) {
186+
if (!Initialize(type))
187+
return;
188+
CHECK_LT(type, mutex_type_count);
189+
CHECK(locked[type].seq);
190+
CHECK_GT(locked[type].recursion, 0);
191+
if (--locked[type].recursion)
192+
return;
193+
locked[type].seq = 0;
194+
locked[type].pc = 0;
195+
}
196+
197+
void CheckNoLocks() {
198+
for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
199+
}
200+
201+
bool Initialize(MutexType type) {
202+
if (type == MutexUnchecked || type == MutexInvalid)
203+
return false;
204+
CHECK_GT(type, MutexInvalid);
205+
if (initialized != 0)
206+
return initialized > 0;
207+
initialized = -1;
208+
SpinMutexLock lock(&mutex_meta_mtx);
209+
if (mutex_type_count < 0)
210+
DebugMutexInit();
211+
initialized = mutex_type_count ? 1 : -1;
212+
return initialized > 0;
213+
}
214+
};
215+
216+
static THREADLOCAL InternalDeadlockDetector deadlock_detector;
217+
218+
void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
219+
220+
void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
221+
222+
void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
223+
#endif
224+
51225
} // namespace __sanitizer

compiler-rt/lib/sanitizer_common/sanitizer_mutex.h

Lines changed: 80 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,87 @@ class Semaphore {
7474
atomic_uint32_t state_ = {0};
7575
};
7676

77+
typedef int MutexType;
78+
79+
enum {
80+
// Used as sentinel and to catch unassigned types
81+
// (should not be used as real Mutex type).
82+
MutexInvalid = 0,
83+
MutexThreadRegistry,
84+
// Each tool own mutexes must start at this number.
85+
MutexLastCommon,
86+
// Type for legacy mutexes that are not checked for deadlocks.
87+
MutexUnchecked = -1,
88+
// Special marks that can be used in MutexMeta::can_lock table.
89+
// The leaf mutexes can be locked under any other non-leaf mutex,
90+
// but no other mutex can be locked while under a leaf mutex.
91+
MutexLeaf = -1,
92+
// Multiple mutexes of this type can be locked at the same time.
93+
MutexMulti = -3,
94+
};
95+
96+
// Go linker does not support THREADLOCAL variables,
97+
// so we can't use per-thread state.
98+
#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
99+
100+
#if SANITIZER_CHECK_DEADLOCKS
101+
struct MutexMeta {
102+
MutexType type;
103+
const char *name;
104+
// The table fixes what mutexes can be locked under what mutexes.
105+
// If the entry for MutexTypeFoo contains MutexTypeBar,
106+
// then Bar mutex can be locked while under Foo mutex.
107+
// Can also contain the special MutexLeaf/MutexMulti marks.
108+
MutexType can_lock[10];
109+
};
110+
#endif
111+
112+
class CheckedMutex {
113+
public:
114+
constexpr CheckedMutex(MutexType type)
115+
#if SANITIZER_CHECK_DEADLOCKS
116+
: type_(type)
117+
#endif
118+
{
119+
}
120+
121+
ALWAYS_INLINE void Lock() {
122+
#if SANITIZER_CHECK_DEADLOCKS
123+
LockImpl(GET_CALLER_PC());
124+
#endif
125+
}
126+
127+
ALWAYS_INLINE void Unlock() {
128+
#if SANITIZER_CHECK_DEADLOCKS
129+
UnlockImpl();
130+
#endif
131+
}
132+
133+
// Checks that the current thread does not hold any mutexes
134+
// (e.g. when returning from a runtime function to user code).
135+
static void CheckNoLocks() {
136+
#if SANITIZER_CHECK_DEADLOCKS
137+
CheckNoLocksImpl();
138+
#endif
139+
}
140+
141+
private:
142+
#if SANITIZER_CHECK_DEADLOCKS
143+
const MutexType type_;
144+
145+
void LockImpl(uptr pc);
146+
void UnlockImpl();
147+
static void CheckNoLocksImpl();
148+
#endif
149+
};
150+
77151
// Reader-writer mutex.
78152
class MUTEX Mutex2 {
79153
public:
80-
constexpr Mutex2() {}
154+
constexpr Mutex2(MutexType type = MutexUnchecked) : checked_(type) {}
81155

82156
void Lock() ACQUIRE() {
157+
checked_.Lock();
83158
u64 reset_mask = ~0ull;
84159
u64 state = atomic_load_relaxed(&state_);
85160
const uptr kMaxSpinIters = 1500;
@@ -125,6 +200,7 @@ class MUTEX Mutex2 {
125200
}
126201

127202
void Unlock() RELEASE() {
203+
checked_.Unlock();
128204
bool wake_writer;
129205
u64 wake_readers;
130206
u64 new_state;
@@ -153,6 +229,7 @@ class MUTEX Mutex2 {
153229
}
154230

155231
void ReadLock() ACQUIRE_SHARED() {
232+
checked_.Lock();
156233
bool locked;
157234
u64 new_state;
158235
u64 state = atomic_load_relaxed(&state_);
@@ -173,6 +250,7 @@ class MUTEX Mutex2 {
173250
}
174251

175252
void ReadUnlock() RELEASE_SHARED() {
253+
checked_.Unlock();
176254
bool wake;
177255
u64 new_state;
178256
u64 state = atomic_load_relaxed(&state_);
@@ -207,6 +285,7 @@ class MUTEX Mutex2 {
207285
}
208286

209287
private:
288+
[[no_unique_address]] CheckedMutex checked_;
210289
atomic_uint64_t state_ = {0};
211290
Semaphore writers_;
212291
Semaphore readers_;

0 commit comments

Comments
 (0)