Skip to content

Commit d261bb3

Browse files
committed
core: More tweaks to the thread-local scheduler interface
1 parent 15ece0c commit d261bb3

File tree

5 files changed

+125
-123
lines changed

5 files changed

+125
-123
lines changed

src/libcore/rt/mod.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -106,16 +106,16 @@ pub enum RuntimeContext {
106106
pub fn context() -> RuntimeContext {
107107

108108
use task::rt::rust_task;
109-
use self::sched::Scheduler;
109+
use self::sched::local_sched;
110110

111111
// XXX: Hitting TLS twice to check if the scheduler exists
112112
// then to check for the task is not good for perf
113113
if unsafe { rust_try_get_task().is_not_null() } {
114114
return OldTaskContext;
115115
} else {
116-
if Scheduler::have_local() {
116+
if local_sched::exists() {
117117
let context = ::cell::empty_cell();
118-
do Scheduler::borrow_local |sched| {
118+
do local_sched::borrow |sched| {
119119
if sched.in_task_context() {
120120
context.put_back(TaskContext);
121121
} else {
@@ -137,7 +137,7 @@ pub fn context() -> RuntimeContext {
137137
#[test]
138138
fn test_context() {
139139
use unstable::run_in_bare_thread;
140-
use self::sched::{Scheduler, Task};
140+
use self::sched::{local_sched, Task};
141141
use self::uvio::UvEventLoop;
142142
use cell::Cell;
143143

@@ -147,11 +147,11 @@ fn test_context() {
147147
let mut sched = ~UvEventLoop::new_scheduler();
148148
let task = ~do Task::new(&mut sched.stack_pool) {
149149
assert!(context() == TaskContext);
150-
let sched = Scheduler::take_local();
150+
let sched = local_sched::take();
151151
do sched.deschedule_running_task_and_then() |task| {
152152
assert!(context() == SchedulerContext);
153153
let task = Cell(task);
154-
do Scheduler::borrow_local |sched| {
154+
do local_sched::borrow |sched| {
155155
sched.task_queue.push_back(task.take());
156156
}
157157
}
@@ -166,7 +166,7 @@ fn test_context() {
166166
pub fn run_in_newsched_task(f: ~fn()) {
167167
use cell::Cell;
168168
use unstable::run_in_bare_thread;
169-
use self::sched::{Scheduler, Task};
169+
use self::sched::Task;
170170
use self::uvio::UvEventLoop;
171171

172172
let f = Cell(Cell(f));

src/libcore/rt/sched/local.rs renamed to src/libcore/rt/sched/local_sched.rs

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ use libc::c_void;
1616
use cast::transmute;
1717

1818
use super::Scheduler;
19+
use super::super::rtio::IoFactoryObject;
1920
use tls = super::super::thread_local_storage;
2021
#[cfg(test)] use super::super::uvio::UvEventLoop;
2122

@@ -50,11 +51,21 @@ pub fn exists() -> bool {
5051
}
5152
}
5253

54+
/// Borrow the thread-local scheduler from thread-local storage.
55+
/// While the scheduler is borrowed it is not available in TLS.
56+
pub fn borrow(f: &fn(&mut Scheduler)) {
57+
let mut sched = take();
58+
f(sched);
59+
put(sched);
60+
}
61+
5362
/// Borrow a mutable reference to the thread-local Scheduler
63+
///
5464
/// # Safety Note
65+
///
5566
/// Because this leaves the Scheduler in thread-local storage it is possible
5667
/// For the Scheduler pointer to be aliased
57-
pub unsafe fn borrow() -> &mut Scheduler {
68+
pub unsafe fn unsafe_borrow() -> &mut Scheduler {
5869
unsafe {
5970
let key = tls_key();
6071
let mut void_sched: *mut c_void = tls::get(key);
@@ -70,6 +81,13 @@ pub unsafe fn borrow() -> &mut Scheduler {
7081
}
7182
}
7283

84+
pub unsafe fn unsafe_borrow_io() -> &mut IoFactoryObject {
85+
unsafe {
86+
let sched = unsafe_borrow();
87+
return sched.event_loop.io().unwrap();
88+
}
89+
}
90+
7391
fn tls_key() -> tls::Key {
7492
maybe_tls_key().get()
7593
}
@@ -125,7 +143,7 @@ fn borrow_smoke_test() {
125143
let scheduler = ~UvEventLoop::new_scheduler();
126144
put(scheduler);
127145
unsafe {
128-
let _scheduler = borrow();
146+
let _scheduler = unsafe_borrow();
129147
}
130148
let _scheduler = take();
131149
}

src/libcore/rt/sched/mod.rs

Lines changed: 61 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,16 @@ use cast::transmute;
1414

1515
use super::work_queue::WorkQueue;
1616
use super::stack::{StackPool, StackSegment};
17-
use super::rtio::{EventLoop, EventLoopObject, IoFactoryObject};
17+
use super::rtio::{EventLoop, EventLoopObject};
1818
use super::context::Context;
1919

2020
#[cfg(test)] use super::uvio::UvEventLoop;
2121
#[cfg(test)] use unstable::run_in_bare_thread;
2222
#[cfg(test)] use int;
2323
#[cfg(test)] use cell::Cell;
2424

25-
mod local;
25+
// A more convenient name for external callers, e.g. `local_sched::take()`
26+
pub mod local_sched;
2627

2728
/// The Scheduler is responsible for coordinating execution of Tasks
2829
/// on a single thread. When the scheduler is running it is owned by
@@ -90,52 +91,25 @@ pub impl Scheduler {
9091
assert!(!self.in_task_context());
9192

9293
// Give ownership of the scheduler (self) to the thread
93-
local::put(self);
94-
95-
let scheduler = unsafe { local::borrow() };
96-
fn run_scheduler_once() {
97-
let scheduler = Scheduler::take_local();
98-
if scheduler.resume_task_from_queue() {
99-
// Ok, a task ran. Nice! We'll do it again later
100-
do Scheduler::borrow_local |scheduler| {
101-
scheduler.event_loop.callback(run_scheduler_once);
94+
local_sched::put(self);
95+
96+
unsafe {
97+
let scheduler = local_sched::unsafe_borrow();
98+
fn run_scheduler_once() {
99+
let scheduler = local_sched::take();
100+
if scheduler.resume_task_from_queue() {
101+
// Ok, a task ran. Nice! We'll do it again later
102+
do local_sched::borrow |scheduler| {
103+
scheduler.event_loop.callback(run_scheduler_once);
104+
}
102105
}
103106
}
104-
}
105-
106-
scheduler.event_loop.callback(run_scheduler_once);
107-
scheduler.event_loop.run();
108-
109-
return local::take();
110-
}
111107

112-
/// Get a mutable pointer to the thread-local I/O
113-
/// # Safety Note
114-
/// This allows other mutable aliases to the scheduler, both in the current
115-
/// execution context and other execution contexts.
116-
unsafe fn borrow_local_io() -> &mut IoFactoryObject {
117-
unsafe {
118-
let io = local::borrow().event_loop.io().unwrap();
119-
transmute::<&mut IoFactoryObject, &mut IoFactoryObject>(io)
108+
scheduler.event_loop.callback(run_scheduler_once);
109+
scheduler.event_loop.run();
120110
}
121-
}
122-
123-
/// Borrow the thread-local scheduler from thread-local storage.
124-
/// While the scheduler is borrowed it is not available in TLS.
125-
fn borrow_local(f: &fn(&mut Scheduler)) {
126-
let mut sched = local::take();
127-
f(sched);
128-
local::put(sched);
129-
}
130-
131-
/// Take ownership of the scheduler from thread local storage
132-
fn take_local() -> ~Scheduler {
133-
local::take()
134-
}
135111

136-
/// Just check whether there is a local scheduler
137-
fn have_local() -> bool {
138-
local::exists()
112+
return local_sched::take();
139113
}
140114

141115
// * Scheduler-context operations
@@ -151,7 +125,7 @@ pub impl Scheduler {
151125
}
152126
None => {
153127
rtdebug!("no tasks in queue");
154-
local::put(self);
128+
local_sched::put(self);
155129
return false;
156130
}
157131
}
@@ -167,22 +141,24 @@ pub impl Scheduler {
167141
self.current_task = Some(task);
168142
self.enqueue_cleanup_job(DoNothing);
169143

170-
local::put(self);
144+
local_sched::put(self);
171145

172146
// Take pointers to both the task and scheduler's saved registers.
173-
let sched = unsafe { local::borrow() };
174-
let (sched_context, _, next_task_context) = sched.get_contexts();
175-
let next_task_context = next_task_context.unwrap();
176-
// Context switch to the task, restoring it's registers
177-
// and saving the scheduler's
178-
Context::swap(sched_context, next_task_context);
179-
180-
let sched = unsafe { local::borrow() };
181-
// The running task should have passed ownership elsewhere
182-
assert!(sched.current_task.is_none());
183-
184-
// Running tasks may have asked us to do some cleanup
185-
sched.run_cleanup_job();
147+
unsafe {
148+
let sched = local_sched::unsafe_borrow();
149+
let (sched_context, _, next_task_context) = sched.get_contexts();
150+
let next_task_context = next_task_context.unwrap();
151+
// Context switch to the task, restoring it's registers
152+
// and saving the scheduler's
153+
Context::swap(sched_context, next_task_context);
154+
155+
let sched = local_sched::unsafe_borrow();
156+
// The running task should have passed ownership elsewhere
157+
assert!(sched.current_task.is_none());
158+
159+
// Running tasks may have asked us to do some cleanup
160+
sched.run_cleanup_job();
161+
}
186162
}
187163

188164

@@ -199,9 +175,9 @@ pub impl Scheduler {
199175
let dead_task = self.current_task.swap_unwrap();
200176
self.enqueue_cleanup_job(RecycleTask(dead_task));
201177

202-
local::put(self);
178+
local_sched::put(self);
203179

204-
let sched = unsafe { local::borrow() };
180+
let sched = unsafe { local_sched::unsafe_borrow() };
205181
let (sched_context, last_task_context, _) = sched.get_contexts();
206182
let last_task_context = last_task_context.unwrap();
207183
Context::swap(last_task_context, sched_context);
@@ -228,15 +204,15 @@ pub impl Scheduler {
228204
let f_opaque = ClosureConverter::from_fn(f_fake_region);
229205
self.enqueue_cleanup_job(GiveTask(blocked_task, f_opaque));
230206

231-
local::put(self);
207+
local_sched::put(self);
232208

233-
let sched = unsafe { local::borrow() };
209+
let sched = unsafe { local_sched::unsafe_borrow() };
234210
let (sched_context, last_task_context, _) = sched.get_contexts();
235211
let last_task_context = last_task_context.unwrap();
236212
Context::swap(last_task_context, sched_context);
237213

238214
// We could be executing in a different thread now
239-
let sched = unsafe { local::borrow() };
215+
let sched = unsafe { local_sched::unsafe_borrow() };
240216
sched.run_cleanup_job();
241217
}
242218

@@ -253,17 +229,19 @@ pub impl Scheduler {
253229
self.enqueue_cleanup_job(RescheduleTask(old_running_task));
254230
self.current_task = Some(next_task);
255231

256-
local::put(self);
232+
local_sched::put(self);
257233

258-
let sched = unsafe { local::borrow() };
259-
let (_, last_task_context, next_task_context) = sched.get_contexts();
260-
let last_task_context = last_task_context.unwrap();
261-
let next_task_context = next_task_context.unwrap();
262-
Context::swap(last_task_context, next_task_context);
263-
264-
// We could be executing in a different thread now
265-
let sched = unsafe { local::borrow() };
266-
sched.run_cleanup_job();
234+
unsafe {
235+
let sched = local_sched::unsafe_borrow();
236+
let (_, last_task_context, next_task_context) = sched.get_contexts();
237+
let last_task_context = last_task_context.unwrap();
238+
let next_task_context = next_task_context.unwrap();
239+
Context::swap(last_task_context, next_task_context);
240+
241+
// We could be executing in a different thread now
242+
let sched = local_sched::unsafe_borrow();
243+
sched.run_cleanup_job();
244+
}
267245
}
268246

269247
// * Other stuff
@@ -363,12 +341,14 @@ pub impl Task {
363341
// This is the first code to execute after the initial
364342
// context switch to the task. The previous context may
365343
// have asked us to do some cleanup.
366-
let sched = unsafe { local::borrow() };
367-
sched.run_cleanup_job();
344+
unsafe {
345+
let sched = local_sched::unsafe_borrow();
346+
sched.run_cleanup_job();
347+
}
368348

369349
start();
370350

371-
let sched = Scheduler::take_local();
351+
let sched = local_sched::take();
372352
sched.terminate_current_task();
373353
};
374354
return wrapper;
@@ -428,7 +408,7 @@ fn test_swap_tasks() {
428408
let mut sched = ~UvEventLoop::new_scheduler();
429409
let task1 = ~do Task::new(&mut sched.stack_pool) {
430410
unsafe { *count_ptr = *count_ptr + 1; }
431-
let mut sched = Scheduler::take_local();
411+
let mut sched = local_sched::take();
432412
let task2 = ~do Task::new(&mut sched.stack_pool) {
433413
unsafe { *count_ptr = *count_ptr + 1; }
434414
};
@@ -460,7 +440,7 @@ fn test_run_a_lot_of_tasks_queued() {
460440
assert!(count == MAX);
461441

462442
fn run_task(count_ptr: *mut int) {
463-
do Scheduler::borrow_local |sched| {
443+
do local_sched::borrow |sched| {
464444
let task = ~do Task::new(&mut sched.stack_pool) {
465445
unsafe {
466446
*count_ptr = *count_ptr + 1;
@@ -493,7 +473,7 @@ fn test_run_a_lot_of_tasks_direct() {
493473
assert!(count == MAX);
494474

495475
fn run_task(count_ptr: *mut int) {
496-
let mut sched = Scheduler::take_local();
476+
let mut sched = local_sched::take();
497477
let task = ~do Task::new(&mut sched.stack_pool) {
498478
unsafe {
499479
*count_ptr = *count_ptr + 1;
@@ -513,11 +493,11 @@ fn test_block_task() {
513493
do run_in_bare_thread {
514494
let mut sched = ~UvEventLoop::new_scheduler();
515495
let task = ~do Task::new(&mut sched.stack_pool) {
516-
let sched = Scheduler::take_local();
496+
let sched = local_sched::take();
517497
assert!(sched.in_task_context());
518498
do sched.deschedule_running_task_and_then() |task| {
519499
let task = Cell(task);
520-
do Scheduler::borrow_local |sched| {
500+
do local_sched::borrow |sched| {
521501
assert!(!sched.in_task_context());
522502
sched.task_queue.push_back(task.take());
523503
}

0 commit comments

Comments
 (0)