Skip to content

Commit 3648784

Browse files
committed
Implement lockorder checking on RwLocks in debug_sync
1 parent 82bbb80 commit 3648784

File tree

1 file changed

+152
-12
lines changed

1 file changed

+152
-12
lines changed

lightning/src/debug_sync.rs

Lines changed: 152 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,23 @@ impl MutexMetadata {
7474
}
7575
}
7676

77-
fn pre_lock(this: &Arc<MutexMetadata>) {
77+
// Returns whether we were a recursive lock (only relevant for read)
78+
fn _pre_lock(this: &Arc<MutexMetadata>, read: bool) -> bool {
79+
let mut inserted = false;
7880
MUTEXES_HELD.with(|held| {
7981
// For each mutex which is currently locked, check that no mutex's locked-before
8082
// set includes the mutex we're about to lock, which would imply a lockorder
8183
// inversion.
8284
for locked in held.borrow().iter() {
85+
if read && *locked == *this {
86+
// Recursive read locks are explicitly allowed
87+
return;
88+
}
89+
}
90+
for locked in held.borrow().iter() {
91+
if !read && *locked == *this {
92+
panic!("Tried to lock a mutex while it was held!");
93+
}
8394
for locked_dep in locked.locked_before.lock().unwrap().iter() {
8495
if *locked_dep == *this {
8596
#[cfg(feature = "backtrace")]
@@ -92,9 +103,14 @@ impl MutexMetadata {
92103
this.locked_before.lock().unwrap().insert(Arc::clone(locked));
93104
}
94105
held.borrow_mut().insert(Arc::clone(this));
106+
inserted = true;
95107
});
108+
inserted
96109
}
97110

111+
fn pre_lock(this: &Arc<MutexMetadata>) { Self::_pre_lock(this, false); }
112+
fn pre_read_lock(this: &Arc<MutexMetadata>) -> bool { Self::_pre_lock(this, true) }
113+
98114
fn try_locked(this: &Arc<MutexMetadata>) {
99115
MUTEXES_HELD.with(|held| {
100116
// Since a try-lock will simply fail if the lock is held already, we do not
@@ -171,54 +187,178 @@ impl<T> Mutex<T> {
171187
}
172188
}
173189

174-
pub struct RwLock<T: ?Sized> {
175-
inner: StdRwLock<T>
190+
pub struct RwLock<T: Sized> {
191+
inner: StdRwLock<T>,
192+
deps: Arc<MutexMetadata>,
176193
}
177194

178-
pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
195+
pub struct RwLockReadGuard<'a, T: Sized + 'a> {
196+
mutex: &'a RwLock<T>,
197+
first_lock: bool,
179198
lock: StdRwLockReadGuard<'a, T>,
180199
}
181200

182-
pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
201+
pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
202+
mutex: &'a RwLock<T>,
183203
lock: StdRwLockWriteGuard<'a, T>,
184204
}
185205

186-
impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
206+
impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
187207
type Target = T;
188208

189209
fn deref(&self) -> &T {
190210
&self.lock.deref()
191211
}
192212
}
193213

194-
impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
214+
impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
215+
fn drop(&mut self) {
216+
if !self.first_lock {
217+
// Note that its not strictly true that the first taken read lock will get unlocked
218+
// last, but in practice our locks are always taken as RAII, so it should basically
219+
// always be true.
220+
return;
221+
}
222+
MUTEXES_HELD.with(|held| {
223+
held.borrow_mut().remove(&self.mutex.deps);
224+
});
225+
}
226+
}
227+
228+
impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
195229
type Target = T;
196230

197231
fn deref(&self) -> &T {
198232
&self.lock.deref()
199233
}
200234
}
201235

202-
impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
236+
impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
237+
fn drop(&mut self) {
238+
MUTEXES_HELD.with(|held| {
239+
held.borrow_mut().remove(&self.mutex.deps);
240+
});
241+
}
242+
}
243+
244+
impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
203245
fn deref_mut(&mut self) -> &mut T {
204246
self.lock.deref_mut()
205247
}
206248
}
207249

208250
impl<T> RwLock<T> {
209251
pub fn new(inner: T) -> RwLock<T> {
210-
RwLock { inner: StdRwLock::new(inner) }
252+
RwLock { inner: StdRwLock::new(inner), deps: Arc::new(MutexMetadata::new()) }
211253
}
212254

213255
pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
214-
self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ())
256+
let first_lock = MutexMetadata::pre_read_lock(&self.deps);
257+
self.inner.read().map(|lock| RwLockReadGuard { mutex: self, lock, first_lock }).map_err(|_| ())
215258
}
216259

217260
pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
218-
self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ())
261+
MutexMetadata::pre_lock(&self.deps);
262+
self.inner.write().map(|lock| RwLockWriteGuard { mutex: self, lock }).map_err(|_| ())
219263
}
220264

221265
pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
222-
self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ())
266+
let res = self.inner.try_write().map(|lock| RwLockWriteGuard { mutex: self, lock }).map_err(|_| ());
267+
if res.is_ok() {
268+
MutexMetadata::try_locked(&self.deps);
269+
}
270+
res
271+
}
272+
}
273+
274+
#[test]
275+
#[should_panic]
276+
fn recursive_lock_fail() {
277+
let mutex = Mutex::new(());
278+
let _a = mutex.lock().unwrap();
279+
let _b = mutex.lock().unwrap();
280+
}
281+
282+
#[test]
283+
fn recursive_read() {
284+
let lock = RwLock::new(());
285+
let _a = lock.read().unwrap();
286+
let _b = lock.read().unwrap();
287+
}
288+
289+
#[test]
290+
#[should_panic]
291+
fn lockorder_fail() {
292+
let a = Mutex::new(());
293+
let b = Mutex::new(());
294+
{
295+
let _a = a.lock().unwrap();
296+
let _b = b.lock().unwrap();
297+
}
298+
{
299+
let _b = b.lock().unwrap();
300+
let _a = a.lock().unwrap();
301+
}
302+
}
303+
304+
#[test]
305+
#[should_panic]
306+
fn write_lockorder_fail() {
307+
let a = RwLock::new(());
308+
let b = RwLock::new(());
309+
{
310+
let _a = a.write().unwrap();
311+
let _b = b.write().unwrap();
312+
}
313+
{
314+
let _b = b.write().unwrap();
315+
let _a = a.write().unwrap();
316+
}
317+
}
318+
319+
#[test]
320+
#[should_panic]
321+
fn read_lockorder_fail() {
322+
let a = RwLock::new(());
323+
let b = RwLock::new(());
324+
{
325+
let _a = a.read().unwrap();
326+
let _b = b.read().unwrap();
327+
}
328+
{
329+
let _b = b.read().unwrap();
330+
let _a = a.read().unwrap();
331+
}
332+
}
333+
334+
#[test]
335+
fn read_recurisve_no_lockorder() {
336+
// Like the above, but note that no lockorder is implied when we recursively read-lock a
337+
// RwLock, causing this to pass just fine.
338+
let a = RwLock::new(());
339+
let b = RwLock::new(());
340+
let _outer = a.read().unwrap();
341+
{
342+
let _a = a.read().unwrap();
343+
let _b = b.read().unwrap();
344+
}
345+
{
346+
let _b = b.read().unwrap();
347+
let _a = a.read().unwrap();
348+
}
349+
}
350+
351+
#[test]
352+
#[should_panic]
353+
fn read_write_lockorder_fail() {
354+
let a = RwLock::new(());
355+
let b = RwLock::new(());
356+
{
357+
let _a = a.write().unwrap();
358+
let _b = b.read().unwrap();
359+
}
360+
{
361+
let _b = b.read().unwrap();
362+
let _a = a.write().unwrap();
223363
}
224364
}

0 commit comments

Comments
 (0)