Skip to content

Commit f5e1074

Browse files
authored
Merge pull request rust-lang#4383 from LorrensP-2158466/remove-leaky-syncobj
use RwLockRef instead of RwLockId
2 parents e2ce96b + 479524f commit f5e1074

File tree

5 files changed

+153
-129
lines changed

5 files changed

+153
-129
lines changed

src/tools/miri/src/concurrency/sync.rs

Lines changed: 105 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,11 @@ impl MutexRef {
6767
fn new() -> Self {
6868
MutexRef(Rc::new(RefCell::new(Mutex::default())))
6969
}
70+
71+
/// Get the id of the thread that currently owns this lock, or `None` if it is not locked.
72+
pub fn owner(&self) -> Option<ThreadId> {
73+
self.0.borrow().owner
74+
}
7075
}
7176

7277
impl VisitProvenance for MutexRef {
@@ -75,8 +80,6 @@ impl VisitProvenance for MutexRef {
7580
}
7681
}
7782

78-
declare_id!(RwLockId);
79-
8083
/// The read-write lock state.
8184
#[derive(Default, Debug)]
8285
struct RwLock {
@@ -111,6 +114,49 @@ struct RwLock {
111114
clock_current_readers: VClock,
112115
}
113116

117+
impl RwLock {
118+
#[inline]
119+
/// Check if locked.
120+
fn is_locked(&self) -> bool {
121+
trace!(
122+
"rwlock_is_locked: writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
123+
self.writer,
124+
self.readers.len(),
125+
);
126+
self.writer.is_some() || self.readers.is_empty().not()
127+
}
128+
129+
/// Check if write locked.
130+
#[inline]
131+
fn is_write_locked(&self) -> bool {
132+
trace!("rwlock_is_write_locked: writer is {:?}", self.writer);
133+
self.writer.is_some()
134+
}
135+
}
136+
137+
#[derive(Default, Clone, Debug)]
138+
pub struct RwLockRef(Rc<RefCell<RwLock>>);
139+
140+
impl RwLockRef {
141+
fn new() -> Self {
142+
RwLockRef(Rc::new(RefCell::new(RwLock::default())))
143+
}
144+
145+
pub fn is_locked(&self) -> bool {
146+
self.0.borrow().is_locked()
147+
}
148+
149+
pub fn is_write_locked(&self) -> bool {
150+
self.0.borrow().is_write_locked()
151+
}
152+
}
153+
154+
impl VisitProvenance for RwLockRef {
155+
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
156+
// RwLockRef contains no provenance.
157+
}
158+
}
159+
114160
declare_id!(CondvarId);
115161

116162
/// The conditional variable state.
@@ -164,7 +210,6 @@ struct FutexWaiter {
164210
/// The state of all synchronization objects.
165211
#[derive(Default, Debug)]
166212
pub struct SynchronizationObjects {
167-
rwlocks: IndexVec<RwLockId, RwLock>,
168213
condvars: IndexVec<CondvarId, Condvar>,
169214
pub(super) init_onces: IndexVec<InitOnceId, InitOnce>,
170215
}
@@ -174,17 +219,17 @@ impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
174219
pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
175220
fn condvar_reacquire_mutex(
176221
&mut self,
177-
mutex_ref: &MutexRef,
222+
mutex_ref: MutexRef,
178223
retval: Scalar,
179224
dest: MPlaceTy<'tcx>,
180225
) -> InterpResult<'tcx> {
181226
let this = self.eval_context_mut();
182-
if this.mutex_is_locked(mutex_ref) {
183-
assert_ne!(this.mutex_get_owner(mutex_ref), this.active_thread());
227+
if let Some(owner) = mutex_ref.owner() {
228+
assert_ne!(owner, this.active_thread());
184229
this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
185230
} else {
186231
// We can have it right now!
187-
this.mutex_lock(mutex_ref);
232+
this.mutex_lock(&mutex_ref);
188233
// Don't forget to write the return value.
189234
this.write_scalar(retval, &dest)?;
190235
}
@@ -196,8 +241,8 @@ impl SynchronizationObjects {
196241
pub fn mutex_create(&mut self) -> MutexRef {
197242
MutexRef::new()
198243
}
199-
pub fn rwlock_create(&mut self) -> RwLockId {
200-
self.rwlocks.push(Default::default())
244+
pub fn rwlock_create(&mut self) -> RwLockRef {
245+
RwLockRef::new()
201246
}
202247

203248
pub fn condvar_create(&mut self) -> CondvarId {
@@ -334,18 +379,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
334379
Some(alloc_extra.get_sync::<T>(offset).unwrap())
335380
}
336381

337-
#[inline]
338-
/// Get the id of the thread that currently owns this lock.
339-
fn mutex_get_owner(&self, mutex_ref: &MutexRef) -> ThreadId {
340-
mutex_ref.0.borrow().owner.unwrap()
341-
}
342-
343-
#[inline]
344-
/// Check if locked.
345-
fn mutex_is_locked(&self, mutex_ref: &MutexRef) -> bool {
346-
mutex_ref.0.borrow().owner.is_some()
347-
}
348-
349382
/// Lock by setting the mutex owner and increasing the lock count.
350383
fn mutex_lock(&mut self, mutex_ref: &MutexRef) {
351384
let this = self.eval_context_mut();
@@ -413,14 +446,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
413446
#[inline]
414447
fn mutex_enqueue_and_block(
415448
&mut self,
416-
mutex_ref: &MutexRef,
449+
mutex_ref: MutexRef,
417450
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
418451
) {
419452
let this = self.eval_context_mut();
420-
assert!(this.mutex_is_locked(mutex_ref), "queuing on unlocked mutex");
421453
let thread = this.active_thread();
422-
mutex_ref.0.borrow_mut().queue.push_back(thread);
423-
let mutex_ref = mutex_ref.clone();
454+
let mut mutex = mutex_ref.0.borrow_mut();
455+
mutex.queue.push_back(thread);
456+
assert!(mutex.owner.is_some(), "queuing on unlocked mutex");
457+
drop(mutex);
424458
this.block_thread(
425459
BlockReason::Mutex,
426460
None,
@@ -432,7 +466,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
432466
|this, unblock: UnblockKind| {
433467
assert_eq!(unblock, UnblockKind::Ready);
434468

435-
assert!(!this.mutex_is_locked(&mutex_ref));
469+
assert!(mutex_ref.owner().is_none());
436470
this.mutex_lock(&mutex_ref);
437471

438472
if let Some((retval, dest)) = retval_dest {
@@ -445,37 +479,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
445479
);
446480
}
447481

448-
#[inline]
449-
/// Check if locked.
450-
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
451-
let this = self.eval_context_ref();
452-
let rwlock = &this.machine.sync.rwlocks[id];
453-
trace!(
454-
"rwlock_is_locked: {:?} writer is {:?} and there are {} reader threads (some of which could hold multiple read locks)",
455-
id,
456-
rwlock.writer,
457-
rwlock.readers.len(),
458-
);
459-
rwlock.writer.is_some() || rwlock.readers.is_empty().not()
460-
}
461-
462-
/// Check if write locked.
463-
#[inline]
464-
fn rwlock_is_write_locked(&self, id: RwLockId) -> bool {
465-
let this = self.eval_context_ref();
466-
let rwlock = &this.machine.sync.rwlocks[id];
467-
trace!("rwlock_is_write_locked: {:?} writer is {:?}", id, rwlock.writer);
468-
rwlock.writer.is_some()
469-
}
470-
471482
/// Read-lock the lock by adding the `reader` the list of threads that own
472483
/// this lock.
473-
fn rwlock_reader_lock(&mut self, id: RwLockId) {
484+
fn rwlock_reader_lock(&mut self, rwlock_ref: &RwLockRef) {
474485
let this = self.eval_context_mut();
475486
let thread = this.active_thread();
476-
assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
477-
trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, thread);
478-
let rwlock = &mut this.machine.sync.rwlocks[id];
487+
trace!("rwlock_reader_lock: now also held (one more time) by {:?}", thread);
488+
let mut rwlock = rwlock_ref.0.borrow_mut();
489+
assert!(!rwlock.is_write_locked(), "the lock is write locked");
479490
let count = rwlock.readers.entry(thread).or_insert(0);
480491
*count = count.strict_add(1);
481492
if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
@@ -485,20 +496,20 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
485496

486497
/// Try read-unlock the lock for the current threads and potentially give the lock to a new owner.
487498
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
488-
fn rwlock_reader_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool> {
499+
fn rwlock_reader_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
489500
let this = self.eval_context_mut();
490501
let thread = this.active_thread();
491-
let rwlock = &mut this.machine.sync.rwlocks[id];
502+
let mut rwlock = rwlock_ref.0.borrow_mut();
492503
match rwlock.readers.entry(thread) {
493504
Entry::Occupied(mut entry) => {
494505
let count = entry.get_mut();
495506
assert!(*count > 0, "rwlock locked with count == 0");
496507
*count -= 1;
497508
if *count == 0 {
498-
trace!("rwlock_reader_unlock: {:?} no longer held by {:?}", id, thread);
509+
trace!("rwlock_reader_unlock: no longer held by {:?}", thread);
499510
entry.remove();
500511
} else {
501-
trace!("rwlock_reader_unlock: {:?} held one less time by {:?}", id, thread);
512+
trace!("rwlock_reader_unlock: held one less time by {:?}", thread);
502513
}
503514
}
504515
Entry::Vacant(_) => return interp_ok(false), // we did not even own this lock
@@ -511,15 +522,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
511522
}
512523

513524
// The thread was a reader. If the lock is not held any more, give it to a writer.
514-
if this.rwlock_is_locked(id).not() {
525+
if rwlock.is_locked().not() {
515526
// All the readers are finished, so set the writer data-race handle to the value
516527
// of the union of all reader data race handles, since the set of readers
517528
// happen-before the writers
518-
let rwlock = &mut this.machine.sync.rwlocks[id];
519-
rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers);
529+
let rwlock_ref = &mut *rwlock;
530+
rwlock_ref.clock_unlocked.clone_from(&rwlock_ref.clock_current_readers);
520531
// See if there is a thread to unblock.
521-
if let Some(writer) = rwlock.writer_queue.pop_front() {
522-
this.unblock_thread(writer, BlockReason::RwLock(id))?;
532+
if let Some(writer) = rwlock_ref.writer_queue.pop_front() {
533+
drop(rwlock); // make RefCell available for unblock callback
534+
this.unblock_thread(writer, BlockReason::RwLock)?;
523535
}
524536
}
525537
interp_ok(true)
@@ -530,26 +542,28 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
530542
#[inline]
531543
fn rwlock_enqueue_and_block_reader(
532544
&mut self,
533-
id: RwLockId,
545+
rwlock_ref: RwLockRef,
534546
retval: Scalar,
535547
dest: MPlaceTy<'tcx>,
536548
) {
537549
let this = self.eval_context_mut();
538550
let thread = this.active_thread();
539-
assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
540-
this.machine.sync.rwlocks[id].reader_queue.push_back(thread);
551+
let mut rwlock = rwlock_ref.0.borrow_mut();
552+
rwlock.reader_queue.push_back(thread);
553+
assert!(rwlock.is_write_locked(), "read-queueing on not write locked rwlock");
554+
drop(rwlock);
541555
this.block_thread(
542-
BlockReason::RwLock(id),
556+
BlockReason::RwLock,
543557
None,
544558
callback!(
545559
@capture<'tcx> {
546-
id: RwLockId,
560+
rwlock_ref: RwLockRef,
547561
retval: Scalar,
548562
dest: MPlaceTy<'tcx>,
549563
}
550564
|this, unblock: UnblockKind| {
551565
assert_eq!(unblock, UnblockKind::Ready);
552-
this.rwlock_reader_lock(id);
566+
this.rwlock_reader_lock(&rwlock_ref);
553567
this.write_scalar(retval, &dest)?;
554568
interp_ok(())
555569
}
@@ -559,12 +573,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
559573

560574
/// Lock by setting the writer that owns the lock.
561575
#[inline]
562-
fn rwlock_writer_lock(&mut self, id: RwLockId) {
576+
fn rwlock_writer_lock(&mut self, rwlock_ref: &RwLockRef) {
563577
let this = self.eval_context_mut();
564578
let thread = this.active_thread();
565-
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
566-
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, thread);
567-
let rwlock = &mut this.machine.sync.rwlocks[id];
579+
trace!("rwlock_writer_lock: now held by {:?}", thread);
580+
581+
let mut rwlock = rwlock_ref.0.borrow_mut();
582+
assert!(!rwlock.is_locked(), "the rwlock is already locked");
568583
rwlock.writer = Some(thread);
569584
if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
570585
data_race.acquire_clock(&rwlock.clock_unlocked, &this.machine.threads);
@@ -574,35 +589,38 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
574589
/// Try to unlock an rwlock held by the current thread.
575590
/// Return `false` if it is held by another thread.
576591
#[inline]
577-
fn rwlock_writer_unlock(&mut self, id: RwLockId) -> InterpResult<'tcx, bool> {
592+
fn rwlock_writer_unlock(&mut self, rwlock_ref: &RwLockRef) -> InterpResult<'tcx, bool> {
578593
let this = self.eval_context_mut();
579594
let thread = this.active_thread();
580-
let rwlock = &mut this.machine.sync.rwlocks[id];
595+
let mut rwlock = rwlock_ref.0.borrow_mut();
581596
interp_ok(if let Some(current_writer) = rwlock.writer {
582597
if current_writer != thread {
583598
// Only the owner can unlock the rwlock.
584599
return interp_ok(false);
585600
}
586601
rwlock.writer = None;
587-
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
602+
trace!("rwlock_writer_unlock: unlocked by {:?}", thread);
588603
// Record release clock for next lock holder.
589604
if let Some(data_race) = this.machine.data_race.as_vclocks_ref() {
590605
data_race.release_clock(&this.machine.threads, |clock| {
591606
rwlock.clock_unlocked.clone_from(clock)
592607
});
593608
}
609+
594610
// The thread was a writer.
595611
//
596612
// We are prioritizing writers here against the readers. As a
597613
// result, not only readers can starve writers, but also writers can
598614
// starve readers.
599615
if let Some(writer) = rwlock.writer_queue.pop_front() {
600-
this.unblock_thread(writer, BlockReason::RwLock(id))?;
616+
drop(rwlock); // make RefCell available for unblock callback
617+
this.unblock_thread(writer, BlockReason::RwLock)?;
601618
} else {
602619
// Take the entire read queue and wake them all up.
603620
let readers = std::mem::take(&mut rwlock.reader_queue);
621+
drop(rwlock); // make RefCell available for unblock callback
604622
for reader in readers {
605-
this.unblock_thread(reader, BlockReason::RwLock(id))?;
623+
this.unblock_thread(reader, BlockReason::RwLock)?;
606624
}
607625
}
608626
true
@@ -616,26 +634,28 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
616634
#[inline]
617635
fn rwlock_enqueue_and_block_writer(
618636
&mut self,
619-
id: RwLockId,
637+
rwlock_ref: RwLockRef,
620638
retval: Scalar,
621639
dest: MPlaceTy<'tcx>,
622640
) {
623641
let this = self.eval_context_mut();
624-
assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
625642
let thread = this.active_thread();
626-
this.machine.sync.rwlocks[id].writer_queue.push_back(thread);
643+
let mut rwlock = rwlock_ref.0.borrow_mut();
644+
rwlock.writer_queue.push_back(thread);
645+
assert!(rwlock.is_locked(), "write-queueing on unlocked rwlock");
646+
drop(rwlock);
627647
this.block_thread(
628-
BlockReason::RwLock(id),
648+
BlockReason::RwLock,
629649
None,
630650
callback!(
631651
@capture<'tcx> {
632-
id: RwLockId,
652+
rwlock_ref: RwLockRef,
633653
retval: Scalar,
634654
dest: MPlaceTy<'tcx>,
635655
}
636656
|this, unblock: UnblockKind| {
637657
assert_eq!(unblock, UnblockKind::Ready);
638-
this.rwlock_writer_lock(id);
658+
this.rwlock_writer_lock(&rwlock_ref);
639659
this.write_scalar(retval, &dest)?;
640660
interp_ok(())
641661
}
@@ -700,15 +720,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
700720
}
701721
// Try to acquire the mutex.
702722
// The timeout only applies to the first wait (until the signal), not for mutex acquisition.
703-
this.condvar_reacquire_mutex(&mutex_ref, retval_succ, dest)
723+
this.condvar_reacquire_mutex(mutex_ref, retval_succ, dest)
704724
}
705725
UnblockKind::TimedOut => {
706726
// We have to remove the waiter from the queue again.
707727
let thread = this.active_thread();
708728
let waiters = &mut this.machine.sync.condvars[condvar].waiters;
709729
waiters.retain(|waiter| *waiter != thread);
710730
// Now get back the lock.
711-
this.condvar_reacquire_mutex(&mutex_ref, retval_timeout, dest)
731+
this.condvar_reacquire_mutex(mutex_ref, retval_timeout, dest)
712732
}
713733
}
714734
}

0 commit comments

Comments
 (0)