@@ -281,7 +281,6 @@ struct RWARCInner<T> { lock: RWlock, failed: bool, data: T }
281
281
#[ mutable]
282
282
struct RWARC < T > {
283
283
x : UnsafeAtomicRcBox < RWARCInner < T > > ,
284
- cant_nest : ( )
285
284
}
286
285
287
286
/// Create a reader/writer ARC with the supplied data.
@@ -299,15 +298,14 @@ pub fn rw_arc_with_condvars<T:Const + Owned>(
299
298
let data =
300
299
RWARCInner { lock : rwlock_with_condvars ( num_condvars) ,
301
300
failed : false , data : user_data } ;
302
- RWARC { x : UnsafeAtomicRcBox :: new ( data) , cant_nest : ( ) }
301
+ RWARC { x : UnsafeAtomicRcBox :: new ( data) , }
303
302
}
304
303
305
304
impl < T : Const + Owned > RWARC < T > {
306
305
/// Duplicate a rwlock-protected ARC, as arc::clone.
307
306
pub fn clone ( & self ) -> RWARC < T > {
308
307
RWARC {
309
308
x : self . x . clone ( ) ,
310
- cant_nest : ( ) ,
311
309
}
312
310
}
313
311
@@ -382,12 +380,12 @@ impl<T:Const + Owned> RWARC<T> {
382
380
* # Example
383
381
*
384
382
* ~~~ {.rust}
385
- * do arc.write_downgrade |write_mode | {
386
- * do (&write_mode) .write_cond |state, condvar| {
383
+ * do arc.write_downgrade |mut write_token | {
384
+ * do write_token .write_cond |state, condvar| {
387
385
* ... exclusive access with mutable state ...
388
386
* }
389
- * let read_mode = arc.downgrade(write_mode );
390
- * do (&read_mode) .read |state| {
387
+ * let read_token = arc.downgrade(write_token );
388
+ * do read_token .read |state| {
391
389
* ... shared access with immutable state ...
392
390
* }
393
391
* }
@@ -815,4 +813,66 @@ mod tests {
815
813
816
814
wp2. recv( ) ; // complete handshake with writer
817
815
}
816
+ #[ cfg( test) ]
817
+ fn test_rw_write_cond_downgrade_read_race_helper( ) {
818
+ // Tests that when a downgrader hands off the "reader cloud" lock
819
+ // because of a contending reader, a writer can't race to get it
820
+ // instead, which would result in readers_and_writers. This tests
821
+ // the sync module rather than this one, but it's here because an
822
+ // rwarc gives us extra shared state to help check for the race.
823
+ // If you want to see this test fail, go to sync.rs and replace the
824
+ // line in RWlock::write_cond() that looks like:
825
+ // "blk(&Condvar { order: opt_lock, ..*cond })"
826
+ // with just "blk(cond)".
827
+ let x = ~RWARC ( true ) ;
828
+ let ( wp, wc) = comm:: stream( ) ;
829
+
830
+ // writer task
831
+ let xw = ( * x) . clone( ) ;
832
+ do task:: spawn {
833
+ do xw. write_cond |state, c| {
834
+ wc. send( ( ) ) ; // tell downgrader it's ok to go
835
+ c. wait( ) ;
836
+ // The core of the test is here: the condvar reacquire path
837
+ // must involve order_lock, so that it cannot race with a reader
838
+ // trying to receive the "reader cloud lock hand-off".
839
+ * state = false ;
840
+ }
841
+ }
842
+
843
+ wp. recv( ) ; // wait for writer to get in
844
+
845
+ do x. write_downgrade |mut write_mode| {
846
+ do write_mode. write_cond |state, c| {
847
+ assert ! ( * state) ;
848
+ // make writer contend in the cond-reacquire path
849
+ c. signal( ) ;
850
+ }
851
+ // make a reader task to trigger the "reader cloud lock" handoff
852
+ let xr = ( * x) . clone( ) ;
853
+ let ( rp, rc) = comm:: stream( ) ;
854
+ do task:: spawn {
855
+ rc. send( ( ) ) ;
856
+ do xr. read |_state| { }
857
+ }
858
+ rp. recv( ) ; // wait for reader task to exist
859
+
860
+ let read_mode = x. downgrade( write_mode) ;
861
+ do read_mode. read |state| {
862
+ // if writer mistakenly got in, make sure it mutates state
863
+ // before we assert on it
864
+ for 5 . times { task : : yield( ) ; }
865
+ // make sure writer didn't get in.
866
+ assert ! ( * state) ;
867
+ }
868
+ }
869
+ }
870
+ #[ test]
871
+ fn test_rw_write_cond_downgrade_read_race ( ) {
872
+ // Ideally the above test case would have yield statements in it that
873
+ // helped to expose the race nearly 100% of the time... but adding
874
+ // yields in the intuitively-right locations made it even less likely,
875
+ // and I wasn't sure why :( . This is a mediocre "next best" option.
876
+ for 8 . times { test_rw_write_cond_downgrade_read_race_helper( ) }
877
+ }
818
878
}
0 commit comments