@@ -26,20 +26,20 @@ use cell::Cell;
26
26
// A more convenient name for external callers, e.g. `local_sched::take()`
27
27
pub mod local_sched;
28
28
29
- /// The Scheduler is responsible for coordinating execution of Tasks
29
+ /// The Scheduler is responsible for coordinating execution of Coroutines
30
30
/// on a single thread. When the scheduler is running it is owned by
31
31
/// thread local storage and the running task is owned by the
32
32
/// scheduler.
33
33
pub struct Scheduler {
34
- priv work_queue : WorkQueue < ~Task > ,
34
+ priv work_queue : WorkQueue < ~Coroutine > ,
35
35
stack_pool : StackPool ,
36
36
/// The event loop used to drive the scheduler and perform I/O
37
37
event_loop : ~EventLoopObject ,
38
38
/// The scheduler's saved context.
39
39
/// Always valid when a task is executing, otherwise not
40
40
priv saved_context : Context ,
41
41
/// The currently executing task
42
- current_task : Option < ~Task > ,
42
+ current_task : Option < ~Coroutine > ,
43
43
/// An action performed after a context switch on behalf of the
44
44
/// code running before the context switch
45
45
priv cleanup_job : Option < CleanupJob >
@@ -49,17 +49,17 @@ pub struct Scheduler {
49
49
// complaining
50
50
type UnsafeTaskReceiver = sys:: Closure ;
51
51
trait ClosureConverter {
52
- fn from_fn ( & fn ( ~Task ) ) -> Self ;
53
- fn to_fn ( self ) -> & fn ( ~Task ) ;
52
+ fn from_fn ( & fn ( ~Coroutine ) ) -> Self ;
53
+ fn to_fn ( self ) -> & fn ( ~Coroutine ) ;
54
54
}
55
55
impl ClosureConverter for UnsafeTaskReceiver {
56
- fn from_fn ( f : & fn ( ~Task ) ) -> UnsafeTaskReceiver { unsafe { transmute ( f) } }
57
- fn to_fn ( self ) -> & fn ( ~Task ) { unsafe { transmute ( self ) } }
56
+ fn from_fn ( f : & fn ( ~Coroutine ) ) -> UnsafeTaskReceiver { unsafe { transmute ( f) } }
57
+ fn to_fn ( self ) -> & fn ( ~Coroutine ) { unsafe { transmute ( self ) } }
58
58
}
59
59
60
60
enum CleanupJob {
61
61
DoNothing ,
62
- GiveTask ( ~Task , UnsafeTaskReceiver )
62
+ GiveTask ( ~Coroutine , UnsafeTaskReceiver )
63
63
}
64
64
65
65
pub impl Scheduler {
@@ -115,7 +115,7 @@ pub impl Scheduler {
115
115
/// Pushes the task onto the work stealing queue and tells the event loop
116
116
/// to run it later. Always use this instead of pushing to the work queue
117
117
/// directly.
118
- fn enqueue_task ( & mut self , task : ~Task ) {
118
+ fn enqueue_task ( & mut self , task : ~Coroutine ) {
119
119
self . work_queue . push_front ( task) ;
120
120
self . event_loop . callback ( resume_task_from_queue) ;
121
121
@@ -164,7 +164,7 @@ pub impl Scheduler {
164
164
abort!( "control reached end of task" ) ;
165
165
}
166
166
167
- fn schedule_new_task ( ~self , task : ~Task ) {
167
+ fn schedule_new_task ( ~self , task : ~Coroutine ) {
168
168
assert ! ( self . in_task_context( ) ) ;
169
169
170
170
do self. switch_running_tasks_and_then ( task) |last_task| {
@@ -177,7 +177,7 @@ pub impl Scheduler {
177
177
178
178
// Core scheduling ops
179
179
180
- fn resume_task_immediately ( ~self , task : ~Task ) {
180
+ fn resume_task_immediately ( ~self , task : ~Coroutine ) {
181
181
let mut this = self ;
182
182
assert ! ( !this. in_task_context( ) ) ;
183
183
@@ -215,15 +215,15 @@ pub impl Scheduler {
215
215
/// The closure here is a *stack* closure that lives in the
216
216
/// running task. It gets transmuted to the scheduler's lifetime
217
217
/// and called while the task is blocked.
218
- fn deschedule_running_task_and_then ( ~self , f : & fn ( ~Task ) ) {
218
+ fn deschedule_running_task_and_then ( ~self , f : & fn ( ~Coroutine ) ) {
219
219
let mut this = self ;
220
220
assert ! ( this. in_task_context( ) ) ;
221
221
222
222
rtdebug ! ( "blocking task" ) ;
223
223
224
224
unsafe {
225
225
let blocked_task = this. current_task . swap_unwrap ( ) ;
226
- let f_fake_region = transmute :: < & fn ( ~Task ) , & fn ( ~Task ) > ( f) ;
226
+ let f_fake_region = transmute :: < & fn ( ~Coroutine ) , & fn ( ~Coroutine ) > ( f) ;
227
227
let f_opaque = ClosureConverter :: from_fn ( f_fake_region) ;
228
228
this. enqueue_cleanup_job ( GiveTask ( blocked_task, f_opaque) ) ;
229
229
}
@@ -245,14 +245,14 @@ pub impl Scheduler {
245
245
/// Switch directly to another task, without going through the scheduler.
246
246
/// You would want to think hard about doing this, e.g. if there are
247
247
/// pending I/O events it would be a bad idea.
248
- fn switch_running_tasks_and_then ( ~self , next_task : ~Task , f : & fn ( ~Task ) ) {
248
+ fn switch_running_tasks_and_then ( ~self , next_task : ~Coroutine , f : & fn ( ~Coroutine ) ) {
249
249
let mut this = self ;
250
250
assert ! ( this. in_task_context( ) ) ;
251
251
252
252
rtdebug ! ( "switching tasks" ) ;
253
253
254
254
let old_running_task = this. current_task . swap_unwrap ( ) ;
255
- let f_fake_region = unsafe { transmute :: < & fn ( ~Task ) , & fn ( ~Task ) > ( f) } ;
255
+ let f_fake_region = unsafe { transmute :: < & fn ( ~Coroutine ) , & fn ( ~Coroutine ) > ( f) } ;
256
256
let f_opaque = ClosureConverter :: from_fn ( f_fake_region) ;
257
257
this. enqueue_cleanup_job ( GiveTask ( old_running_task, f_opaque) ) ;
258
258
this. current_task = Some ( next_task) ;
@@ -318,7 +318,7 @@ pub impl Scheduler {
318
318
// because borrowck thinks the three patterns are conflicting
319
319
// borrows
320
320
unsafe {
321
- let last_task = transmute :: < Option < & Task > , Option < & mut Task > > ( last_task) ;
321
+ let last_task = transmute :: < Option < & Coroutine > , Option < & mut Coroutine > > ( last_task) ;
322
322
let last_task_context = match last_task {
323
323
Some ( t) => Some ( & mut t. saved_context ) , None => None
324
324
} ;
@@ -333,9 +333,9 @@ pub impl Scheduler {
333
333
}
334
334
}
335
335
336
- static TASK_MIN_STACK_SIZE : uint = 10000000 ; // XXX: Too much stack
336
+ static MIN_STACK_SIZE : uint = 10000000 ; // XXX: Too much stack
337
337
338
- pub struct Task {
338
+ pub struct Coroutine {
339
339
/// The segment of stack on which the task is currently running or,
340
340
/// if the task is blocked, on which the task will resume execution
341
341
priv current_stack_segment : StackSegment ,
@@ -346,19 +346,19 @@ pub struct Task {
346
346
local_services : LocalServices
347
347
}
348
348
349
- pub impl Task {
350
- fn new ( stack_pool : & mut StackPool , start : ~fn ( ) ) -> Task {
351
- Task :: with_local ( stack_pool, LocalServices :: new ( ) , start)
349
+ pub impl Coroutine {
350
+ fn new ( stack_pool : & mut StackPool , start : ~fn ( ) ) -> Coroutine {
351
+ Coroutine :: with_local ( stack_pool, LocalServices :: new ( ) , start)
352
352
}
353
353
354
354
fn with_local ( stack_pool : & mut StackPool ,
355
355
local_services : LocalServices ,
356
- start : ~fn ( ) ) -> Task {
357
- let start = Task :: build_start_wrapper ( start) ;
358
- let mut stack = stack_pool. take_segment ( TASK_MIN_STACK_SIZE ) ;
356
+ start : ~fn ( ) ) -> Coroutine {
357
+ let start = Coroutine :: build_start_wrapper ( start) ;
358
+ let mut stack = stack_pool. take_segment ( MIN_STACK_SIZE ) ;
359
359
// NB: Context holds a pointer to that ~fn
360
360
let initial_context = Context :: new ( start, & mut stack) ;
361
- return Task {
361
+ return Coroutine {
362
362
current_stack_segment : stack,
363
363
saved_context : initial_context,
364
364
local_services : local_services
@@ -390,7 +390,7 @@ pub impl Task {
390
390
/// Destroy the task and try to reuse its components
391
391
fn recycle ( ~self , stack_pool : & mut StackPool ) {
392
392
match self {
393
- ~Task { current_stack_segment, _} => {
393
+ ~Coroutine { current_stack_segment, _} => {
394
394
stack_pool. give_segment ( current_stack_segment) ;
395
395
}
396
396
}
@@ -414,7 +414,7 @@ mod test {
414
414
let task_ran_ptr: * mut bool = & mut task_ran;
415
415
416
416
let mut sched = ~UvEventLoop :: new_scheduler ( ) ;
417
- let task = ~do Task :: new ( & mut sched. stack_pool ) {
417
+ let task = ~do Coroutine :: new ( & mut sched. stack_pool ) {
418
418
unsafe { * task_ran_ptr = true ; }
419
419
} ;
420
420
sched. enqueue_task ( task) ;
@@ -432,7 +432,7 @@ mod test {
432
432
433
433
let mut sched = ~UvEventLoop :: new_scheduler ( ) ;
434
434
for int:: range( 0 , total) |_| {
435
- let task = ~do Task :: new ( & mut sched. stack_pool ) {
435
+ let task = ~do Coroutine :: new ( & mut sched. stack_pool ) {
436
436
unsafe { * task_count_ptr = * task_count_ptr + 1 ; }
437
437
} ;
438
438
sched. enqueue_task ( task) ;
@@ -449,10 +449,10 @@ mod test {
449
449
let count_ptr: * mut int = & mut count;
450
450
451
451
let mut sched = ~UvEventLoop :: new_scheduler ( ) ;
452
- let task1 = ~do Task :: new ( & mut sched. stack_pool ) {
452
+ let task1 = ~do Coroutine :: new ( & mut sched. stack_pool ) {
453
453
unsafe { * count_ptr = * count_ptr + 1 ; }
454
454
let mut sched = local_sched:: take ( ) ;
455
- let task2 = ~do Task :: new ( & mut sched. stack_pool ) {
455
+ let task2 = ~do Coroutine :: new ( & mut sched. stack_pool ) {
456
456
unsafe { * count_ptr = * count_ptr + 1 ; }
457
457
} ;
458
458
// Context switch directly to the new task
@@ -479,7 +479,7 @@ mod test {
479
479
480
480
let mut sched = ~UvEventLoop :: new_scheduler ( ) ;
481
481
482
- let start_task = ~do Task :: new ( & mut sched. stack_pool ) {
482
+ let start_task = ~do Coroutine :: new ( & mut sched. stack_pool ) {
483
483
run_task ( count_ptr) ;
484
484
} ;
485
485
sched. enqueue_task ( start_task) ;
@@ -489,7 +489,7 @@ mod test {
489
489
490
490
fn run_task ( count_ptr : * mut int ) {
491
491
do local_sched:: borrow |sched| {
492
- let task = ~do Task :: new ( & mut sched. stack_pool ) {
492
+ let task = ~do Coroutine :: new ( & mut sched. stack_pool ) {
493
493
unsafe {
494
494
* count_ptr = * count_ptr + 1 ;
495
495
if * count_ptr != MAX {
@@ -507,7 +507,7 @@ mod test {
507
507
fn test_block_task ( ) {
508
508
do run_in_bare_thread {
509
509
let mut sched = ~UvEventLoop : : new_scheduler ( ) ;
510
- let task = ~do Task :: new ( & mut sched. stack_pool ) {
510
+ let task = ~do Coroutine :: new ( & mut sched. stack_pool ) {
511
511
let sched = local_sched:: take ( ) ;
512
512
assert ! ( sched. in_task_context( ) ) ;
513
513
do sched. deschedule_running_task_and_then ( ) |task| {
0 commit comments