@@ -142,9 +142,9 @@ struct sem_and_signal_release {
142
142
}
143
143
144
144
/// A mechanism for atomic-unlock-and-deschedule blocking and signalling.
145
- struct condvar { priv sem: & sem < waitqueue > ; }
145
+ struct condvar { priv sem: & sem < waitqueue > ; drop { } }
146
146
147
- impl condvar {
147
+ impl & condvar {
148
148
/// Atomically drop the associated lock, and block until a signal is sent.
149
149
fn wait ( ) {
150
150
// Create waiter nobe.
@@ -212,8 +212,9 @@ impl condvar {
212
212
}
213
213
214
214
impl & sem < waitqueue > {
215
- fn access_cond < U > ( blk : fn ( condvar ) -> U ) -> U {
216
- do self . access { blk ( condvar { sem : self } ) }
215
+ // The only other place that condvars get built is rwlock_write_mode.
216
+ fn access_cond < U > ( blk : fn ( c : & condvar ) -> U ) -> U {
217
+ do self . access { blk ( & condvar { sem : self } ) }
217
218
}
218
219
}
219
220
@@ -272,7 +273,7 @@ impl &mutex {
272
273
fn lock < U > ( blk : fn ( ) -> U ) -> U { ( & self . sem ) . access ( blk) }
273
274
274
275
/// Run a function with ownership of the mutex and a handle to a condvar.
275
- fn lock_cond < U > ( blk : fn ( condvar ) -> U ) -> U {
276
+ fn lock_cond < U > ( blk : fn ( c : & condvar ) -> U ) -> U {
276
277
( & self . sem ) . access_cond ( blk)
277
278
}
278
279
}
@@ -321,12 +322,18 @@ impl &rwlock {
321
322
do ( & self . order_lock ) . access {
322
323
let mut first_reader = false ;
323
324
do self. state . with |state| {
324
- state. read_mode = true ;
325
325
first_reader = ( state. read_count == 0 ) ;
326
326
state. read_count += 1 ;
327
327
}
328
328
if first_reader {
329
329
( & self . access_lock ) . acquire ( ) ;
330
+ do self. state . with |state| {
331
+ // Must happen *after* getting access_lock. If
332
+ // this is set while readers are waiting, but
333
+ // while a writer holds the lock, the writer will
334
+ // be confused if they downgrade-then-unlock.
335
+ state. read_mode = true ;
336
+ }
330
337
}
331
338
}
332
339
release = some ( rwlock_release_read ( self ) ) ;
@@ -357,7 +364,7 @@ impl &rwlock {
357
364
* the waiting task is signalled. (Note: a writer that waited and then
358
365
* was signalled might reacquire the lock before other waiting writers.)
359
366
*/
360
- fn write_cond < U > ( blk : fn ( condvar ) -> U ) -> U {
367
+ fn write_cond < U > ( blk : fn ( c : & condvar ) -> U ) -> U {
361
368
// NB: You might think I should thread the order_lock into the cond
362
369
// wait call, so that it gets waited on before access_lock gets
363
370
// reacquired upon being woken up. However, (a) this would be not
@@ -374,7 +381,62 @@ impl &rwlock {
374
381
}
375
382
}
376
383
377
- // to-do implement downgrade
384
+ /**
385
+ * As write(), but with the ability to atomically 'downgrade' the lock;
386
+ * i.e., to become a reader without letting other writers get the lock in
387
+ * the meantime (such as unlocking and then re-locking as a reader would
388
+ * do). The block takes a "write mode token" argument, which can be
389
+ * transformed into a "read mode token" by calling downgrade(). Example:
390
+ *
391
+ * do lock.write_downgrade |write_mode| {
392
+ * do (&write_mode).write_cond |condvar| {
393
+ * ... exclusive access ...
394
+ * }
395
+ * let read_mode = lock.downgrade(write_mode);
396
+ * do (&read_mode).read {
397
+ * ... shared access ...
398
+ * }
399
+ * }
400
+ */
401
+ fn write_downgrade < U > ( blk : fn ( +rwlock_write_mode ) -> U ) -> U {
402
+ // Implementation slightly different from the slicker 'write's above.
403
+ // The exit path is conditional on whether the caller downgrades.
404
+ let mut _release = none;
405
+ unsafe {
406
+ do task:: unkillable {
407
+ ( & self . order_lock ) . acquire ( ) ;
408
+ ( & self . access_lock ) . acquire ( ) ;
409
+ ( & self . order_lock ) . release ( ) ;
410
+ }
411
+ _release = some ( rwlock_release_downgrade ( self ) ) ;
412
+ }
413
+ blk ( rwlock_write_mode { lock : self } )
414
+ }
415
+
416
+ fn downgrade ( +token : rwlock_write_mode ) -> rwlock_read_mode {
417
+ if !ptr:: ref_eq ( self , token. lock ) {
418
+ fail ~"Can ' t downgrade ( ) with a different rwlock' s write_mode!";
419
+ }
420
+ unsafe {
421
+ do task::unkillable {
422
+ let mut first_reader = false;
423
+ do self.state.with |state| {
424
+ assert !state.read_mode;
425
+ state.read_mode = true;
426
+ first_reader = (state.read_count == 0);
427
+ state.read_count += 1;
428
+ }
429
+ if !first_reader {
430
+ // Guaranteed not to let another writer in, because
431
+ // another reader was holding the order_lock. Hence they
432
+ // must be the one to get the access_lock (because all
433
+ // access_locks are acquired with order_lock held).
434
+ (&self.access_lock).release();
435
+ }
436
+ }
437
+ }
438
+ rwlock_read_mode { lock: token.lock }
439
+ }
378
440
}
379
441
380
442
// FIXME(#3136) should go inside of read()
@@ -386,8 +448,12 @@ struct rwlock_release_read {
386
448
let mut last_reader = false;
387
449
do self.lock.state.with |state| {
388
450
assert state.read_mode;
451
+ assert state.read_count > 0;
389
452
state.read_count -= 1;
390
- last_reader = ( state. read_count == 0 ) ;
453
+ if state.read_count == 0 {
454
+ last_reader = true;
455
+ state.read_mode = false;
456
+ }
391
457
}
392
458
if last_reader {
393
459
(&self.lock.access_lock).release();
@@ -396,6 +462,56 @@ struct rwlock_release_read {
396
462
}
397
463
}
398
464
465
+ // FIXME(#3136) should go inside of downgrade()
466
+ struct rwlock_release_downgrade {
467
+ lock: &rwlock;
468
+ new(lock: &rwlock) { self.lock = lock; }
469
+ drop unsafe {
470
+ do task::unkillable {
471
+ let mut writer_or_last_reader = false;
472
+ do self.lock.state.with |state| {
473
+ if state.read_mode {
474
+ assert state.read_count > 0;
475
+ state.read_count -= 1;
476
+ if state.read_count == 0 {
477
+ // Case 1: Writer downgraded & was the last reader
478
+ writer_or_last_reader = true;
479
+ state.read_mode = false;
480
+ } else {
481
+ // Case 2: Writer downgraded & was not the last reader
482
+ }
483
+ } else {
484
+ // Case 3: Writer did not downgrade
485
+ writer_or_last_reader = true;
486
+ }
487
+ }
488
+ if writer_or_last_reader {
489
+ (&self.lock.access_lock).release();
490
+ }
491
+ }
492
+ }
493
+ }
494
+
495
+ /// The " write permission" token used for rwlock. write_downgrade ( ) .
496
+ // FIXME(#3145): make lock priv somehow
497
+ struct rwlock_write_mode { lock: & rwlock; drop { } }
498
+ /// The "read permission" token used for rwlock.write_downgrade().
499
+ struct rwlock_read_mode { priv lock : & rwlock ; drop { } }
500
+
501
+ // FIXME(#3145) XXX Region invariance forbids "mode.write(blk)"
502
+ impl rwlock_write_mode {
503
+ /// Access the pre-downgrade rwlock in write mode.
504
+ fn write < U > ( blk : fn ( ) -> U ) -> U { blk ( ) }
505
+ /// Access the pre-downgrade rwlock in write mode with a condvar.
506
+ fn write_cond < U > ( blk : fn ( c : & condvar ) -> U ) -> U {
507
+ blk ( & condvar { sem : & self . lock . access_lock } )
508
+ }
509
+ }
510
+ impl rwlock_read_mode {
511
+ /// Access the post-downgrade rwlock in read mode.
512
+ fn read < U > ( blk : fn ( ) -> U ) -> U { blk ( ) }
513
+ }
514
+
399
515
/****************************************************************************
400
516
* Tests
401
517
****************************************************************************/
@@ -510,9 +626,11 @@ mod tests {
510
626
let sharedstate = ~0 ;
511
627
let ptr = ptr:: addr_of ( * sharedstate) ;
512
628
do task:: spawn {
513
- let sharedstate = unsafe { unsafe : : reinterpret_cast ( ptr) } ;
629
+ let sharedstate : & mut int =
630
+ unsafe { unsafe :: reinterpret_cast ( ptr) } ;
514
631
access_shared ( sharedstate, m2, 10 ) ;
515
632
c. send ( ( ) ) ;
633
+
516
634
}
517
635
access_shared ( sharedstate, m, 10 ) ;
518
636
let _ = p. recv ( ) ;
@@ -645,21 +763,27 @@ mod tests {
645
763
// child task must have finished by the time try returns
646
764
do m. lock_cond |cond| {
647
765
let _woken = cond. signal ( ) ;
648
- // FIXME(#3145) - The semantics of pipes are not quite what I want
649
- // here - the pipe doesn't get 'terminated' if the child was
650
- // punted awake during failure.
651
- // assert !woken;
766
+ // FIXME(#3145) this doesn't work
767
+ //assert !woken;
652
768
}
653
769
}
654
770
/************************************************************************
655
771
* Reader/writer lock tests
656
772
************************************************************************/
657
773
#[ cfg( test) ]
658
- fn lock_rwlock_in_mode ( x : & rwlock , reader : bool , blk : fn ( ) ) {
659
- if reader { x. read ( blk) ; } else { x. write ( blk) ; }
774
+ enum rwlock_mode { read, write, downgrade, downgrade_read }
775
+ #[ cfg( test) ]
776
+ fn lock_rwlock_in_mode ( x : & rwlock , mode : rwlock_mode , blk : fn ( ) ) {
777
+ match mode {
778
+ read => x. read ( blk) ,
779
+ write => x. write ( blk) ,
780
+ downgrade => do x. write_downgrade |mode| { mode. write ( blk) ; } ,
781
+ downgrade_read =>
782
+ do x. write_downgrade |mode| { x. downgrade ( mode) . read ( blk) ; } ,
783
+ }
660
784
}
661
785
#[ cfg( test) ]
662
- fn test_rwlock_exclusion ( reader1 : bool , reader2 : bool ) {
786
+ fn test_rwlock_exclusion ( mode1 : rwlock_mode , mode2 : rwlock_mode ) {
663
787
// Test mutual exclusion between readers and writers. Just like the
664
788
// mutex mutual exclusion test, a ways above.
665
789
let ( c, p) = pipes:: stream ( ) ;
@@ -668,19 +792,20 @@ mod tests {
668
792
let sharedstate = ~0 ;
669
793
let ptr = ptr:: addr_of ( * sharedstate) ;
670
794
do task:: spawn {
671
- let sharedstate = unsafe { unsafe : : reinterpret_cast ( ptr) } ;
672
- access_shared ( sharedstate, x2, reader1, 10 ) ;
795
+ let sharedstate : & mut int =
796
+ unsafe { unsafe :: reinterpret_cast ( ptr) } ;
797
+ access_shared ( sharedstate, x2, mode1, 10 ) ;
673
798
c. send ( ( ) ) ;
674
799
}
675
- access_shared ( sharedstate, x, reader2 , 10 ) ;
800
+ access_shared ( sharedstate, x, mode2 , 10 ) ;
676
801
let _ = p. recv ( ) ;
677
802
678
803
assert * sharedstate == 20 ;
679
804
680
- fn access_shared ( sharedstate : & mut int , x : & rwlock , reader : bool ,
805
+ fn access_shared ( sharedstate : & mut int , x : & rwlock , mode : rwlock_mode ,
681
806
n : uint ) {
682
807
for n. times {
683
- do lock_rwlock_in_mode( x, reader ) {
808
+ do lock_rwlock_in_mode( x, mode ) {
684
809
let oldval = * sharedstate;
685
810
task:: yield ( ) ;
686
811
* sharedstate = oldval + 1 ;
@@ -690,32 +815,59 @@ mod tests {
690
815
}
691
816
#[ test]
692
817
fn test_rwlock_readers_wont_modify_the_data ( ) {
693
- test_rwlock_exclusion ( true , false ) ;
694
- test_rwlock_exclusion ( false , true ) ;
818
+ test_rwlock_exclusion ( read, write) ;
819
+ test_rwlock_exclusion ( write, read) ;
820
+ test_rwlock_exclusion ( read, downgrade) ;
821
+ test_rwlock_exclusion ( downgrade, read) ;
695
822
}
696
823
#[ test]
697
824
fn test_rwlock_writers_and_writers ( ) {
698
- test_rwlock_exclusion ( false , false ) ;
825
+ test_rwlock_exclusion ( write, write) ;
826
+ test_rwlock_exclusion ( write, downgrade) ;
827
+ test_rwlock_exclusion ( downgrade, write) ;
828
+ test_rwlock_exclusion ( downgrade, downgrade) ;
699
829
}
700
- #[ test]
701
- fn test_rwlock_readers_and_readers ( ) {
830
+ #[ cfg( test) ]
831
+ fn test_rwlock_handshake ( mode1 : rwlock_mode , mode2 : rwlock_mode ,
832
+ make_mode2_go_first : bool ) {
702
833
// Much like sem_multi_resource.
703
834
let x = ~rwlock ( ) ;
704
835
let x2 = ~x. clone ( ) ;
705
836
let ( c1, p1) = pipes:: stream ( ) ;
706
837
let ( c2, p2) = pipes:: stream ( ) ;
707
838
do task:: spawn {
708
- do x2. read {
839
+ if !make_mode2_go_first {
840
+ let _ = p2. recv ( ) ; // parent sends to us once it locks, or ...
841
+ }
842
+ do lock_rwlock_in_mode ( x2, mode2) {
843
+ if make_mode2_go_first {
844
+ c1. send ( ( ) ) ; // ... we send to it once we lock
845
+ }
709
846
let _ = p2. recv ( ) ;
710
847
c1. send ( ( ) ) ;
711
848
}
712
849
}
713
- do x. read {
850
+ if make_mode2_go_first {
851
+ let _ = p1. recv ( ) ; // child sends to us once it locks, or ...
852
+ }
853
+ do lock_rwlock_in_mode ( x, mode1) {
854
+ if !make_mode2_go_first {
855
+ c2. send ( ( ) ) ; // ... we send to it once we lock
856
+ }
714
857
c2. send ( ( ) ) ;
715
858
let _ = p1. recv ( ) ;
716
859
}
717
860
}
718
861
#[ test]
862
+ fn test_rwlock_readers_and_readers ( ) {
863
+ test_rwlock_handshake ( read, read, false ) ;
864
+ // The downgrader needs to get in before the reader gets in, otherwise
865
+ // they cannot end up reading at the same time.
866
+ test_rwlock_handshake ( downgrade_read, read, false ) ;
867
+ test_rwlock_handshake ( read, downgrade_read, true ) ;
868
+ // Two downgrade_reads can never both end up reading at the same time.
869
+ }
870
+ #[ test]
719
871
fn test_rwlock_cond_wait ( ) {
720
872
// As test_mutex_cond_wait above.
721
873
let x = ~rwlock ( ) ;
@@ -751,26 +903,40 @@ mod tests {
751
903
do x. read { } // Just for good measure
752
904
}
753
905
#[ cfg( test) ] #[ ignore( cfg( windows) ) ]
754
- fn rwlock_kill_helper ( reader1 : bool , reader2 : bool ) {
906
+ fn rwlock_kill_helper ( mode1 : rwlock_mode , mode2 : rwlock_mode ) {
755
907
// Mutex must get automatically unlocked if failed/killed within.
756
908
let x = ~rwlock ( ) ;
757
909
let x2 = ~x. clone ( ) ;
758
910
759
911
let result: result:: result < ( ) , ( ) > = do task:: try {
760
- do lock_rwlock_in_mode( x2, reader1 ) {
912
+ do lock_rwlock_in_mode( x2, mode1 ) {
761
913
fail;
762
914
}
763
915
} ;
764
916
assert result. is_err ( ) ;
765
917
// child task must have finished by the time try returns
766
- do lock_rwlock_in_mode ( x, reader2 ) { }
918
+ do lock_rwlock_in_mode ( x, mode2 ) { }
767
919
}
768
920
#[ test] #[ ignore( cfg( windows) ) ]
769
- fn test_rwlock_reader_killed_writer ( ) { rwlock_kill_helper ( true , false ) ; }
921
+ fn test_rwlock_reader_killed_writer ( ) { rwlock_kill_helper ( read , write ) ; }
770
922
#[ test] #[ ignore( cfg( windows) ) ]
771
- fn test_rwlock_writer_killed_reader ( ) { rwlock_kill_helper ( false , true ) ; }
923
+ fn test_rwlock_writer_killed_reader ( ) { rwlock_kill_helper ( write , read ) ; }
772
924
#[ test] #[ ignore( cfg( windows) ) ]
773
- fn test_rwlock_reader_killed_reader ( ) { rwlock_kill_helper ( true , true ) ; }
925
+ fn test_rwlock_reader_killed_reader ( ) { rwlock_kill_helper ( read , read ) ; }
774
926
#[ test] #[ ignore( cfg( windows) ) ]
775
- fn test_rwlock_writer_killed_writer ( ) { rwlock_kill_helper ( false , false ) ; }
927
+ fn test_rwlock_writer_killed_writer ( ) { rwlock_kill_helper ( write, write) ; }
928
+ #[ test] #[ should_fail] #[ ignore( cfg( windows) ) ]
929
+ fn test_rwlock_downgrade_cant_swap ( ) {
930
+ // Tests that you can't downgrade with a different rwlock's token.
931
+ let x = ~rwlock ( ) ;
932
+ let y = ~rwlock ( ) ;
933
+ do x. write_downgrade |xwrite| {
934
+ let mut xopt = some ( xwrite) ;
935
+ do y. write_downgrade |_ywrite| {
936
+ do y. downgrade ( option:: swap_unwrap ( & mut xopt) ) . read {
937
+ error ! ( "oops, y.downgrade(x) should have failed!" ) ;
938
+ }
939
+ }
940
+ }
941
+ }
776
942
}
0 commit comments