Skip to content

Commit 82bbb80

Browse files
committed
Refactor debug sync methods into helper functions
1 parent 0e0aabe commit 82bbb80

File tree

1 file changed

+47
-36
lines changed

1 file changed

+47
-36
lines changed

lightning/src/debug_sync.rs

Lines changed: 47 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,50 @@ impl std::hash::Hash for MutexMetadata {
6464
fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); }
6565
}
6666

67+
impl MutexMetadata {
68+
fn new() -> MutexMetadata {
69+
MutexMetadata {
70+
locked_before: StdMutex::new(HashSet::new()),
71+
mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64,
72+
#[cfg(feature = "backtrace")]
73+
mutex_construction_bt: Backtrace::new(),
74+
}
75+
}
76+
77+
fn pre_lock(this: &Arc<MutexMetadata>) {
78+
MUTEXES_HELD.with(|held| {
79+
// For each mutex which is currently locked, check that no mutex's locked-before
80+
// set includes the mutex we're about to lock, which would imply a lockorder
81+
// inversion.
82+
for locked in held.borrow().iter() {
83+
for locked_dep in locked.locked_before.lock().unwrap().iter() {
84+
if *locked_dep == *this {
85+
#[cfg(feature = "backtrace")]
86+
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt);
87+
#[cfg(not(feature = "backtrace"))]
88+
panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
89+
}
90+
}
91+
// Insert any already-held mutexes in our locked-before set.
92+
this.locked_before.lock().unwrap().insert(Arc::clone(locked));
93+
}
94+
held.borrow_mut().insert(Arc::clone(this));
95+
});
96+
}
97+
98+
fn try_locked(this: &Arc<MutexMetadata>) {
99+
MUTEXES_HELD.with(|held| {
100+
// Since a try-lock will simply fail if the lock is held already, we do not
101+
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
102+
// succeeds, we do consider it to have created lockorder dependencies.
103+
for locked in held.borrow().iter() {
104+
this.locked_before.lock().unwrap().insert(Arc::clone(locked));
105+
}
106+
held.borrow_mut().insert(Arc::clone(this));
107+
});
108+
}
109+
}
110+
67111
pub struct Mutex<T: Sized> {
68112
inner: StdMutex<T>,
69113
deps: Arc<MutexMetadata>,
@@ -110,51 +154,18 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {
110154

111155
impl<T> Mutex<T> {
112156
pub fn new(inner: T) -> Mutex<T> {
113-
Mutex {
114-
inner: StdMutex::new(inner),
115-
deps: Arc::new(MutexMetadata {
116-
locked_before: StdMutex::new(HashSet::new()),
117-
mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64,
118-
#[cfg(feature = "backtrace")]
119-
mutex_construction_bt: Backtrace::new(),
120-
}),
121-
}
157+
Mutex { inner: StdMutex::new(inner), deps: Arc::new(MutexMetadata::new()) }
122158
}
123159

124160
pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
125-
MUTEXES_HELD.with(|held| {
126-
// For each mutex which is currently locked, check that no mutex's locked-before
127-
// set includes the mutex we're about to lock, which would imply a lockorder
128-
// inversion.
129-
for locked in held.borrow().iter() {
130-
for locked_dep in locked.locked_before.lock().unwrap().iter() {
131-
if *locked_dep == self.deps {
132-
#[cfg(feature = "backtrace")]
133-
panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt);
134-
#[cfg(not(feature = "backtrace"))]
135-
panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
136-
}
137-
}
138-
// Insert any already-held mutexes in our locked-before set.
139-
self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked));
140-
}
141-
held.borrow_mut().insert(Arc::clone(&self.deps));
142-
});
161+
MutexMetadata::pre_lock(&self.deps);
143162
self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
144163
}
145164

146165
pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
147166
let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
148167
if res.is_ok() {
149-
MUTEXES_HELD.with(|held| {
150-
// Since a try-lock will simply fail if the lock is held already, we do not
151-
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
152-
// succeeds, we do consider it to have created lockorder dependencies.
153-
for locked in held.borrow().iter() {
154-
self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked));
155-
}
156-
held.borrow_mut().insert(Arc::clone(&self.deps));
157-
});
168+
MutexMetadata::try_locked(&self.deps);
158169
}
159170
res
160171
}

0 commit comments

Comments
 (0)