8
8
// option. This file may not be copied, modified, or distributed
9
9
// except according to those terms.
10
10
11
- use option:: * ;
11
+ use either:: { Left , Right } ;
12
+ use option:: { Option , Some , None } ;
12
13
use sys;
13
14
use cast:: transmute;
14
15
use clone:: Clone ;
@@ -20,6 +21,7 @@ use super::rtio::{EventLoop, EventLoopObject, RemoteCallbackObject};
20
21
use super :: context:: Context ;
21
22
use super :: task:: { Task , AnySched , Sched } ;
22
23
use super :: message_queue:: MessageQueue ;
24
+ use rt:: kill:: BlockedTask ;
23
25
use rt:: local_ptr;
24
26
use rt:: local:: Local ;
25
27
use rt:: rtio:: RemoteCallback ;
@@ -271,6 +273,14 @@ impl Scheduler {
271
273
} ;
272
274
}
273
275
276
+ /// As enqueue_task, but with the possibility for the blocked task to
277
+ /// already have been killed.
278
+ pub fn enqueue_blocked_task ( & mut self , blocked_task : BlockedTask ) {
279
+ do blocked_task. wake ( ) . map_consume |task| {
280
+ self . enqueue_task ( task) ;
281
+ } ;
282
+ }
283
+
274
284
// * Scheduler-context operations
275
285
276
286
fn interpret_message_queue ( ~self ) -> bool {
@@ -412,14 +422,26 @@ impl Scheduler {
412
422
/// Called by a running task to end execution, after which it will
413
423
/// be recycled by the scheduler for reuse in a new task.
414
424
pub fn terminate_current_task ( ~self ) {
415
- assert ! ( self . in_task_context( ) ) ;
425
+ let mut this = self ;
426
+ assert ! ( this. in_task_context( ) ) ;
416
427
417
428
rtdebug ! ( "ending running task" ) ;
418
429
419
- do self. deschedule_running_task_and_then |sched, dead_task| {
420
- let mut dead_task = dead_task;
421
- let coroutine = dead_task. coroutine . take_unwrap ( ) ;
422
- coroutine. recycle ( & mut sched. stack_pool ) ;
430
+ // This task is post-cleanup, so it must be unkillable. This sequence
431
+ // of descheduling and recycling must not get interrupted by a kill.
432
+ // FIXME(#7544): Make this use an inner descheduler, like yield should.
433
+ this. current_task . get_mut_ref ( ) . death . unkillable += 1 ;
434
+
435
+ do this. deschedule_running_task_and_then |sched, dead_task| {
436
+ match dead_task. wake ( ) {
437
+ Some ( dead_task) => {
438
+ let mut dead_task = dead_task;
439
+ dead_task. death . unkillable -= 1 ; // FIXME(#7544) ugh
440
+ let coroutine = dead_task. coroutine . take_unwrap ( ) ;
441
+ coroutine. recycle ( & mut sched. stack_pool ) ;
442
+ }
443
+ None => rtabort ! ( "dead task killed before recycle" ) ,
444
+ }
423
445
}
424
446
425
447
rtabort ! ( "control reached end of task" ) ;
@@ -440,7 +462,7 @@ impl Scheduler {
440
462
// here we know we are home, execute now OR we know we
441
463
// aren't homed, and that this sched doesn't care
442
464
do this. switch_running_tasks_and_then ( task) |sched, last_task| {
443
- sched. enqueue_task ( last_task) ;
465
+ sched. enqueue_blocked_task ( last_task) ;
444
466
}
445
467
} else if !homed && !this. run_anything {
446
468
// the task isn't homed, but it can't be run here
@@ -491,6 +513,13 @@ impl Scheduler {
491
513
}
492
514
}
493
515
516
+ pub fn resume_blocked_task_immediately ( ~self , blocked_task : BlockedTask ) {
517
+ match blocked_task. wake ( ) {
518
+ Some ( task) => self . resume_task_immediately ( task) ,
519
+ None => Local :: put ( self ) ,
520
+ } ;
521
+ }
522
+
494
523
/// Block a running task, context switch to the scheduler, then pass the
495
524
/// blocked task to a closure.
496
525
///
@@ -503,7 +532,7 @@ impl Scheduler {
503
532
/// This passes a Scheduler pointer to the fn after the context switch
504
533
/// in order to prevent that fn from performing further scheduling operations.
505
534
/// Doing further scheduling could easily result in infinite recursion.
506
- pub fn deschedule_running_task_and_then ( ~self , f : & fn ( & mut Scheduler , ~ Task ) ) {
535
+ pub fn deschedule_running_task_and_then ( ~self , f : & fn ( & mut Scheduler , BlockedTask ) ) {
507
536
let mut this = self ;
508
537
assert ! ( this. in_task_context( ) ) ;
509
538
@@ -512,8 +541,8 @@ impl Scheduler {
512
541
513
542
unsafe {
514
543
let blocked_task = this. current_task . take_unwrap ( ) ;
515
- let f_fake_region = transmute :: < & fn ( & mut Scheduler , ~ Task ) ,
516
- & fn ( & mut Scheduler , ~ Task ) > ( f) ;
544
+ let f_fake_region = transmute :: < & fn ( & mut Scheduler , BlockedTask ) ,
545
+ & fn ( & mut Scheduler , BlockedTask ) > ( f) ;
517
546
let f_opaque = ClosureConverter :: from_fn ( f_fake_region) ;
518
547
this. enqueue_cleanup_job ( GiveTask ( blocked_task, f_opaque) ) ;
519
548
}
@@ -539,7 +568,7 @@ impl Scheduler {
539
568
/// You would want to think hard about doing this, e.g. if there are
540
569
/// pending I/O events it would be a bad idea.
541
570
pub fn switch_running_tasks_and_then ( ~self , next_task : ~Task ,
542
- f : & fn ( & mut Scheduler , ~ Task ) ) {
571
+ f : & fn ( & mut Scheduler , BlockedTask ) ) {
543
572
let mut this = self ;
544
573
assert ! ( this. in_task_context( ) ) ;
545
574
@@ -548,8 +577,8 @@ impl Scheduler {
548
577
549
578
let old_running_task = this. current_task . take_unwrap ( ) ;
550
579
let f_fake_region = unsafe {
551
- transmute :: < & fn ( & mut Scheduler , ~ Task ) ,
552
- & fn ( & mut Scheduler , ~ Task ) > ( f)
580
+ transmute :: < & fn ( & mut Scheduler , BlockedTask ) ,
581
+ & fn ( & mut Scheduler , BlockedTask ) > ( f)
553
582
} ;
554
583
let f_opaque = ClosureConverter :: from_fn ( f_fake_region) ;
555
584
this. enqueue_cleanup_job ( GiveTask ( old_running_task, f_opaque) ) ;
@@ -590,7 +619,15 @@ impl Scheduler {
590
619
let cleanup_job = self . cleanup_job . take_unwrap ( ) ;
591
620
match cleanup_job {
592
621
DoNothing => { }
593
- GiveTask ( task, f) => ( f. to_fn ( ) ) ( self , task)
622
+ GiveTask ( task, f) => {
623
+ let f = f. to_fn ( ) ;
624
+ // Task might need to receive a kill signal instead of blocking.
625
+ // We can call the "and_then" only if it blocks successfully.
626
+ match BlockedTask :: try_block ( task) {
627
+ Left ( killed_task) => self . enqueue_task ( killed_task) ,
628
+ Right ( blocked_task) => f ( self , blocked_task) ,
629
+ }
630
+ }
594
631
}
595
632
}
596
633
@@ -663,12 +700,14 @@ impl SchedHandle {
663
700
// complaining
664
701
type UnsafeTaskReceiver = sys:: Closure ;
665
702
trait ClosureConverter {
666
- fn from_fn ( & fn ( & mut Scheduler , ~ Task ) ) -> Self ;
667
- fn to_fn ( self ) -> & fn ( & mut Scheduler , ~ Task ) ;
703
+ fn from_fn ( & fn ( & mut Scheduler , BlockedTask ) ) -> Self ;
704
+ fn to_fn ( self ) -> & fn ( & mut Scheduler , BlockedTask ) ;
668
705
}
669
706
impl ClosureConverter for UnsafeTaskReceiver {
670
- fn from_fn ( f : & fn ( & mut Scheduler , ~Task ) ) -> UnsafeTaskReceiver { unsafe { transmute ( f) } }
671
- fn to_fn ( self ) -> & fn ( & mut Scheduler , ~Task ) { unsafe { transmute ( self ) } }
707
+ fn from_fn ( f : & fn ( & mut Scheduler , BlockedTask ) ) -> UnsafeTaskReceiver {
708
+ unsafe { transmute ( f) }
709
+ }
710
+ fn to_fn ( self ) -> & fn ( & mut Scheduler , BlockedTask ) { unsafe { transmute ( self ) } }
672
711
}
673
712
674
713
@@ -928,8 +967,7 @@ mod test {
928
967
} ;
929
968
// Context switch directly to the new task
930
969
do sched. switch_running_tasks_and_then ( task2) |sched, task1| {
931
- let task1 = Cell :: new ( task1) ;
932
- sched. enqueue_task ( task1. take ( ) ) ;
970
+ sched. enqueue_blocked_task ( task1) ;
933
971
}
934
972
unsafe { * count_ptr = * count_ptr + 1 ; }
935
973
} ;
@@ -980,9 +1018,8 @@ mod test {
980
1018
let sched = Local :: take :: < Scheduler > ( ) ;
981
1019
assert ! ( sched. in_task_context( ) ) ;
982
1020
do sched. deschedule_running_task_and_then ( ) |sched, task| {
983
- let task = Cell :: new ( task) ;
984
1021
assert ! ( !sched. in_task_context( ) ) ;
985
- sched. enqueue_task ( task. take ( ) ) ;
1022
+ sched. enqueue_blocked_task ( task) ;
986
1023
}
987
1024
} ;
988
1025
sched. enqueue_task ( task) ;
@@ -1004,7 +1041,7 @@ mod test {
1004
1041
do sched. event_loop . callback_ms ( 10 ) {
1005
1042
rtdebug ! ( "in callback" ) ;
1006
1043
let mut sched = Local :: take :: < Scheduler > ( ) ;
1007
- sched. enqueue_task ( task. take ( ) ) ;
1044
+ sched. enqueue_blocked_task ( task. take ( ) ) ;
1008
1045
Local :: put ( sched) ;
1009
1046
}
1010
1047
}
0 commit comments