@@ -16,11 +16,11 @@ use super::work_queue::WorkQueue;
16
16
use super :: stack:: { StackPool , StackSegment } ;
17
17
use super :: rtio:: { EventLoop , EventLoopObject } ;
18
18
use super :: context:: Context ;
19
+ use cell:: Cell ;
19
20
20
21
#[ cfg( test) ] use super :: uvio:: UvEventLoop ;
21
22
#[ cfg( test) ] use unstable:: run_in_bare_thread;
22
23
#[ cfg( test) ] use int;
23
- #[ cfg( test) ] use cell:: Cell ;
24
24
25
25
// A more convenient name for external callers, e.g. `local_sched::take()`
26
26
pub mod local_sched;
@@ -58,8 +58,6 @@ impl ClosureConverter for UnsafeTaskReceiver {
58
58
59
59
enum CleanupJob {
60
60
DoNothing ,
61
- RescheduleTask ( ~Task ) ,
62
- RecycleTask ( ~Task ) ,
63
61
GiveTask ( ~Task , UnsafeTaskReceiver )
64
62
}
65
63
@@ -143,44 +141,25 @@ pub impl Scheduler {
143
141
144
142
rtdebug ! ( "ending running task" ) ;
145
143
146
- let dead_task = self . current_task . swap_unwrap ( ) ;
147
- self . enqueue_cleanup_job ( RecycleTask ( dead_task) ) ;
148
-
149
- local_sched:: put ( self ) ;
150
-
151
- let sched = unsafe { local_sched:: unsafe_borrow ( ) } ;
152
- let ( sched_context, last_task_context, _) = sched. get_contexts ( ) ;
153
- let last_task_context = last_task_context. unwrap ( ) ;
154
- Context :: swap ( last_task_context, sched_context) ;
144
+ do self. deschedule_running_task_and_then |dead_task| {
145
+ let dead_task = Cell ( dead_task) ;
146
+ do local_sched:: borrow |sched| {
147
+ dead_task. take ( ) . recycle ( & mut sched. stack_pool ) ;
148
+ }
149
+ }
155
150
156
151
// Control never reaches here
157
152
}
158
153
159
- /// Switch directly to another task, without going through the scheduler.
160
- /// You would want to think hard about doing this, e.g. if there are
161
- /// pending I/O events it would be a bad idea.
162
- fn resume_task_from_running_task_direct ( ~self , next_task : ~Task ) {
154
+ fn schedule_new_task ( ~self , task : ~Task ) {
163
155
let mut self = self ;
164
156
assert ! ( self . in_task_context( ) ) ;
165
157
166
- rtdebug ! ( "switching tasks" ) ;
167
-
168
- let old_running_task = self . current_task . swap_unwrap ( ) ;
169
- self . enqueue_cleanup_job ( RescheduleTask ( old_running_task) ) ;
170
- self . current_task = Some ( next_task) ;
171
-
172
- local_sched:: put ( self ) ;
173
-
174
- unsafe {
175
- let sched = local_sched:: unsafe_borrow ( ) ;
176
- let ( _, last_task_context, next_task_context) = sched. get_contexts ( ) ;
177
- let last_task_context = last_task_context. unwrap ( ) ;
178
- let next_task_context = next_task_context. unwrap ( ) ;
179
- Context :: swap ( last_task_context, next_task_context) ;
180
-
181
- // We could be executing in a different thread now
182
- let sched = local_sched:: unsafe_borrow ( ) ;
183
- sched. run_cleanup_job ( ) ;
158
+ do self. switch_running_tasks_and_then ( task) |last_task| {
159
+ let last_task = Cell ( last_task) ;
160
+ do local_sched:: borrow |sched| {
161
+ sched. task_queue . push_front ( last_task. take ( ) ) ;
162
+ }
184
163
}
185
164
}
186
165
@@ -294,11 +273,6 @@ pub impl Scheduler {
294
273
let cleanup_job = self . cleanup_job . swap_unwrap ( ) ;
295
274
match cleanup_job {
296
275
DoNothing => { }
297
- RescheduleTask ( task) => {
298
- // NB: Pushing to the *front* of the queue
299
- self . task_queue . push_front ( task) ;
300
- }
301
- RecycleTask ( task) => task. recycle ( & mut self . stack_pool ) ,
302
276
GiveTask ( task, f) => ( f. to_fn ( ) ) ( task)
303
277
}
304
278
}
@@ -316,8 +290,6 @@ pub impl Scheduler {
316
290
Option < & ' a mut Context > ,
317
291
Option < & ' a mut Context > ) {
318
292
let last_task = match self . cleanup_job {
319
- Some ( RescheduleTask ( ~ref task) ) |
320
- Some ( RecycleTask ( ~ref task) ) |
321
293
Some ( GiveTask ( ~ref task, _) ) => {
322
294
Some ( task)
323
295
}
@@ -432,29 +404,6 @@ fn test_several_tasks() {
432
404
}
433
405
}
434
406
435
- #[ test]
436
- fn test_swap_tasks ( ) {
437
- do run_in_bare_thread {
438
- let mut count = 0 ;
439
- let count_ptr: * mut int = & mut count;
440
-
441
- let mut sched = ~UvEventLoop :: new_scheduler ( ) ;
442
- let task1 = ~do Task :: new ( & mut sched. stack_pool ) {
443
- unsafe { * count_ptr = * count_ptr + 1 ; }
444
- let mut sched = local_sched:: take ( ) ;
445
- let task2 = ~do Task :: new ( & mut sched. stack_pool ) {
446
- unsafe { * count_ptr = * count_ptr + 1 ; }
447
- } ;
448
- // Context switch directly to the new task
449
- sched. resume_task_from_running_task_direct ( task2) ;
450
- unsafe { * count_ptr = * count_ptr + 1 ; }
451
- } ;
452
- sched. task_queue . push_back ( task1) ;
453
- sched. run ( ) ;
454
- assert ! ( count == 3 ) ;
455
- }
456
- }
457
-
458
407
#[ test]
459
408
fn test_swap_tasks_then ( ) {
460
409
do run_in_bare_thread {
@@ -516,39 +465,6 @@ fn test_run_a_lot_of_tasks_queued() {
516
465
}
517
466
}
518
467
519
- #[ bench] #[ test] #[ ignore( reason = "too much stack allocation" ) ]
520
- fn test_run_a_lot_of_tasks_direct ( ) {
521
- do run_in_bare_thread {
522
- static MAX : int = 100000 ;
523
- let mut count = 0 ;
524
- let count_ptr: * mut int = & mut count;
525
-
526
- let mut sched = ~UvEventLoop :: new_scheduler ( ) ;
527
-
528
- let start_task = ~do Task :: new ( & mut sched. stack_pool ) {
529
- run_task ( count_ptr) ;
530
- } ;
531
- sched. task_queue . push_back ( start_task) ;
532
- sched. run ( ) ;
533
-
534
- assert ! ( count == MAX ) ;
535
-
536
- fn run_task ( count_ptr : * mut int ) {
537
- let mut sched = local_sched:: take ( ) ;
538
- let task = ~do Task :: new ( & mut sched. stack_pool ) {
539
- unsafe {
540
- * count_ptr = * count_ptr + 1 ;
541
- if * count_ptr != MAX {
542
- run_task ( count_ptr) ;
543
- }
544
- }
545
- } ;
546
- // Context switch directly to the new task
547
- sched. resume_task_from_running_task_direct ( task) ;
548
- } ;
549
- }
550
- }
551
-
552
468
#[ test]
553
469
fn test_block_task ( ) {
554
470
do run_in_bare_thread {
0 commit comments