Skip to content

Commit 3208fc3

Browse files
committed
Merge remote-tracking branch 'brson/io-wip' into io
Conflicts: src/libstd/rt/sched.rs src/libstd/rt/task.rs src/libstd/rt/test.rs src/libstd/task/mod.rs src/libstd/task/spawn.rs
2 parents b08c446 + 505ef7e commit 3208fc3

File tree

9 files changed

+788
-105
lines changed

9 files changed

+788
-105
lines changed

src/libstd/rt/join_latch.rs

Lines changed: 645 additions & 0 deletions
Large diffs are not rendered by default.

src/libstd/rt/metrics.rs

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,11 @@ pub struct SchedMetrics {
3434
// Message receives that do not block the receiver
3535
rendezvous_recvs: uint,
3636
// Message receives that block the receiver
37-
non_rendezvous_recvs: uint
37+
non_rendezvous_recvs: uint,
38+
// JoinLatch releases that create tombstones
39+
release_tombstone: uint,
40+
// JoinLatch releases that do not create tombstones
41+
release_no_tombstone: uint,
3842
}
3943

4044
impl SchedMetrics {
@@ -51,7 +55,9 @@ impl SchedMetrics {
5155
rendezvous_sends: 0,
5256
non_rendezvous_sends: 0,
5357
rendezvous_recvs: 0,
54-
non_rendezvous_recvs: 0
58+
non_rendezvous_recvs: 0,
59+
release_tombstone: 0,
60+
release_no_tombstone: 0
5561
}
5662
}
5763
}
@@ -70,6 +76,8 @@ impl ToStr for SchedMetrics {
7076
non_rendezvous_sends: %u\n\
7177
rendezvous_recvs: %u\n\
7278
non_rendezvous_recvs: %u\n\
79+
release_tombstone: %u\n\
80+
release_no_tombstone: %u\n\
7381
",
7482
self.turns,
7583
self.messages_received,
@@ -82,7 +90,9 @@ impl ToStr for SchedMetrics {
8290
self.rendezvous_sends,
8391
self.non_rendezvous_sends,
8492
self.rendezvous_recvs,
85-
self.non_rendezvous_recvs
93+
self.non_rendezvous_recvs,
94+
self.release_tombstone,
95+
self.release_no_tombstone
8696
)
8797
}
8898
}

src/libstd/rt/mod.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,9 @@ pub mod local_ptr;
133133
/// Bindings to pthread/windows thread-local storage.
134134
pub mod thread_local_storage;
135135

136+
/// A concurrent data structure with which parent tasks wait on child tasks.
137+
pub mod join_latch;
138+
136139
pub mod metrics;
137140

138141

@@ -164,7 +167,7 @@ pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int {
164167
let sleepers = SleeperList::new();
165168
let mut sched = ~Scheduler::new(loop_, work_queue, sleepers);
166169
sched.no_sleep = true;
167-
let main_task = ~Coroutine::new(&mut sched.stack_pool, main);
170+
let main_task = ~Coroutine::new_root(&mut sched.stack_pool, main);
168171

169172
sched.enqueue_task(main_task);
170173
sched.run();
@@ -238,7 +241,7 @@ fn test_context() {
238241
do run_in_bare_thread {
239242
assert_eq!(context(), GlobalContext);
240243
let mut sched = ~new_test_uv_sched();
241-
let task = ~do Coroutine::new(&mut sched.stack_pool) {
244+
let task = ~do Coroutine::new_root(&mut sched.stack_pool) {
242245
assert_eq!(context(), TaskContext);
243246
let sched = Local::take::<Scheduler>();
244247
do sched.deschedule_running_task_and_then() |sched, task| {

src/libstd/rt/sched.rs

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -181,8 +181,10 @@ pub impl Scheduler {
181181
// XXX: Reenable this once we're using a per-task queue. With a shared
182182
// queue this is not true
183183
//assert!(sched.work_queue.is_empty());
184-
// let out = sched.metrics.to_str();
185-
// rtdebug!("scheduler metrics: %s\n", out);
184+
rtdebug!("scheduler metrics: %s\n", {
185+
use to_str::ToStr;
186+
sched.metrics.to_str()
187+
});
186188
return sched;
187189
}
188190

@@ -728,19 +730,19 @@ pub impl Coroutine {
728730
// using the AnySched paramter.
729731

730732
fn new_homed(stack_pool: &mut StackPool, home: SchedHome, start: ~fn()) -> Coroutine {
731-
Coroutine::with_task_homed(stack_pool, ~Task::new(), start, home)
733+
Coroutine::with_task_homed(stack_pool, ~Task::new_root(), start, home)
732734
}
733735

734-
fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine {
735-
Coroutine::with_task(stack_pool, ~Task::new(), start)
736+
fn new_root(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine {
737+
Coroutine::with_task(stack_pool, ~Task::new_root(), start)
736738
}
737739

738740
fn with_task_homed(stack_pool: &mut StackPool,
739741
task: ~Task,
740742
start: ~fn(),
741743
home: SchedHome) -> Coroutine {
742744

743-
static MIN_STACK_SIZE: uint = 10000000; // XXX: Too much stack
745+
static MIN_STACK_SIZE: uint = 1000000; // XXX: Too much stack
744746

745747
let start = Coroutine::build_start_wrapper(start);
746748
let mut stack = stack_pool.take_segment(MIN_STACK_SIZE);
@@ -930,14 +932,14 @@ mod test {
930932
};
931933
let t1f = Cell(t1f);
932934

933-
let t2f = ~do Coroutine::new(&mut normal_sched.stack_pool) {
935+
let t2f = ~do Coroutine::new_root(&mut normal_sched.stack_pool) {
934936
let on_special = Coroutine::on_special();
935937
rtdebug!("t2 should not be on special: %b", on_special);
936938
assert!(!on_special);
937939
};
938940
let t2f = Cell(t2f);
939941

940-
let t3f = ~do Coroutine::new(&mut normal_sched.stack_pool) {
942+
let t3f = ~do Coroutine::new_root(&mut normal_sched.stack_pool) {
941943
// not on special
942944
let on_special = Coroutine::on_special();
943945
rtdebug!("t3 should not be on special: %b", on_special);
@@ -986,7 +988,7 @@ mod test {
986988
let t4 = Cell(t4);
987989

988990
// build a main task that runs our four tests
989-
let main_task = ~do Coroutine::new(&mut normal_sched.stack_pool) {
991+
let main_task = ~do Coroutine::new_root(&mut normal_sched.stack_pool) {
990992
// the two tasks that require a normal start location
991993
t2.take()();
992994
t4.take()();
@@ -1141,7 +1143,7 @@ mod test {
11411143
let task_ran_ptr: *mut bool = &mut task_ran;
11421144

11431145
let mut sched = ~new_test_uv_sched();
1144-
let task = ~do Coroutine::new(&mut sched.stack_pool) {
1146+
let task = ~do Coroutine::new_root(&mut sched.stack_pool) {
11451147
unsafe { *task_ran_ptr = true; }
11461148
};
11471149
sched.enqueue_task(task);
@@ -1159,7 +1161,7 @@ mod test {
11591161

11601162
let mut sched = ~new_test_uv_sched();
11611163
for int::range(0, total) |_| {
1162-
let task = ~do Coroutine::new(&mut sched.stack_pool) {
1164+
let task = ~do Coroutine::new_root(&mut sched.stack_pool) {
11631165
unsafe { *task_count_ptr = *task_count_ptr + 1; }
11641166
};
11651167
sched.enqueue_task(task);
@@ -1176,10 +1178,10 @@ mod test {
11761178
let count_ptr: *mut int = &mut count;
11771179

11781180
let mut sched = ~new_test_uv_sched();
1179-
let task1 = ~do Coroutine::new(&mut sched.stack_pool) {
1181+
let task1 = ~do Coroutine::new_root(&mut sched.stack_pool) {
11801182
unsafe { *count_ptr = *count_ptr + 1; }
11811183
let mut sched = Local::take::<Scheduler>();
1182-
let task2 = ~do Coroutine::new(&mut sched.stack_pool) {
1184+
let task2 = ~do Coroutine::new_root(&mut sched.stack_pool) {
11831185
unsafe { *count_ptr = *count_ptr + 1; }
11841186
};
11851187
// Context switch directly to the new task
@@ -1204,7 +1206,7 @@ mod test {
12041206

12051207
let mut sched = ~new_test_uv_sched();
12061208

1207-
let start_task = ~do Coroutine::new(&mut sched.stack_pool) {
1209+
let start_task = ~do Coroutine::new_root(&mut sched.stack_pool) {
12081210
run_task(count_ptr);
12091211
};
12101212
sched.enqueue_task(start_task);
@@ -1214,7 +1216,7 @@ mod test {
12141216

12151217
fn run_task(count_ptr: *mut int) {
12161218
do Local::borrow::<Scheduler, ()> |sched| {
1217-
let task = ~do Coroutine::new(&mut sched.stack_pool) {
1219+
let task = ~do Coroutine::new_root(&mut sched.stack_pool) {
12181220
unsafe {
12191221
*count_ptr = *count_ptr + 1;
12201222
if *count_ptr != MAX {
@@ -1232,7 +1234,7 @@ mod test {
12321234
fn test_block_task() {
12331235
do run_in_bare_thread {
12341236
let mut sched = ~new_test_uv_sched();
1235-
let task = ~do Coroutine::new(&mut sched.stack_pool) {
1237+
let task = ~do Coroutine::new_root(&mut sched.stack_pool) {
12361238
let sched = Local::take::<Scheduler>();
12371239
assert!(sched.in_task_context());
12381240
do sched.deschedule_running_task_and_then() |sched, task| {
@@ -1279,13 +1281,13 @@ mod test {
12791281
let mut sched1 = ~new_test_uv_sched();
12801282
let handle1 = sched1.make_handle();
12811283
let handle1_cell = Cell(handle1);
1282-
let task1 = ~do Coroutine::new(&mut sched1.stack_pool) {
1284+
let task1 = ~do Coroutine::new_root(&mut sched1.stack_pool) {
12831285
chan_cell.take().send(());
12841286
};
12851287
sched1.enqueue_task(task1);
12861288

12871289
let mut sched2 = ~new_test_uv_sched();
1288-
let task2 = ~do Coroutine::new(&mut sched2.stack_pool) {
1290+
let task2 = ~do Coroutine::new_root(&mut sched2.stack_pool) {
12891291
port_cell.take().recv();
12901292
// Release the other scheduler's handle so it can exit
12911293
handle1_cell.take();

src/libstd/rt/task.rs

Lines changed: 41 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,19 +16,23 @@
1616
use prelude::*;
1717
use libc::{c_void, uintptr_t};
1818
use cast::transmute;
19+
use option::{Option, Some, None};
1920
use rt::local::Local;
2021
use super::local_heap::LocalHeap;
2122
use rt::logging::StdErrLogger;
2223
use rt::sched::{SchedHome, AnySched};
24+
use rt::join_latch::JoinLatch;
2325

2426
pub struct Task {
2527
heap: LocalHeap,
2628
gc: GarbageCollector,
2729
storage: LocalStorage,
2830
logger: StdErrLogger,
29-
unwinder: Option<Unwinder>,
30-
destroyed: bool,
31-
home: Option<SchedHome>
31+
unwinder: Unwinder,
32+
home: Option<SchedHome>,
33+
join_latch: Option<~JoinLatch>,
34+
on_exit: Option<~fn(bool)>,
35+
destroyed: bool
3236
}
3337

3438
pub struct GarbageCollector;
@@ -39,27 +43,31 @@ pub struct Unwinder {
3943
}
4044

4145
impl Task {
42-
pub fn new() -> Task {
46+
pub fn new_root() -> Task {
4347
Task {
4448
heap: LocalHeap::new(),
4549
gc: GarbageCollector,
4650
storage: LocalStorage(ptr::null(), None),
4751
logger: StdErrLogger,
48-
unwinder: Some(Unwinder { unwinding: false }),
49-
destroyed: false,
50-
home: Some(AnySched)
52+
unwinder: Unwinder { unwinding: false },
53+
home: Some(AnySched),
54+
join_latch: Some(JoinLatch::new_root()),
55+
on_exit: None,
56+
destroyed: false
5157
}
5258
}
5359

54-
pub fn without_unwinding() -> Task {
60+
pub fn new_child(&mut self) -> Task {
5561
Task {
5662
heap: LocalHeap::new(),
5763
gc: GarbageCollector,
5864
storage: LocalStorage(ptr::null(), None),
5965
logger: StdErrLogger,
60-
unwinder: None,
61-
destroyed: false,
62-
home: Some(AnySched)
66+
home: Some(AnySched),
67+
unwinder: Unwinder { unwinding: false },
68+
join_latch: Some(self.join_latch.get_mut_ref().new_child()),
69+
on_exit: None,
70+
destroyed: false
6371
}
6472
}
6573

@@ -74,20 +82,24 @@ impl Task {
7482
assert!(ptr::ref_eq(task, self));
7583
}
7684

77-
match self.unwinder {
78-
Some(ref mut unwinder) => {
79-
// If there's an unwinder then set up the catch block
80-
unwinder.try(f);
85+
self.unwinder.try(f);
86+
self.destroy();
87+
88+
// Wait for children. Possibly report the exit status.
89+
let local_success = !self.unwinder.unwinding;
90+
let join_latch = self.join_latch.swap_unwrap();
91+
match self.on_exit {
92+
Some(ref on_exit) => {
93+
let success = join_latch.wait(local_success);
94+
(*on_exit)(success);
8195
}
8296
None => {
83-
// Otherwise, just run the body
84-
f()
97+
join_latch.release(local_success);
8598
}
8699
}
87-
self.destroy();
88100
}
89101

90-
/// Must be called manually before finalization to clean up
102+
/// must be called manually before finalization to clean up
91103
/// thread-local resources. Some of the routines here expect
92104
/// Task to be available recursively so this must be
93105
/// called unsafely, without removing Task from
@@ -233,5 +245,15 @@ mod test {
233245
assert!(port.recv() == 10);
234246
}
235247
}
248+
249+
#[test]
250+
fn linked_failure() {
251+
do run_in_newsched_task() {
252+
let res = do spawntask_try {
253+
spawntask_random(|| fail!());
254+
};
255+
assert!(res.is_err());
256+
}
257+
}
236258
}
237259

0 commit comments

Comments
 (0)