1
+ #include < vector>
1
2
#include " rust_internal.h"
2
3
#include " rust_util.h"
3
4
#include " rust_scheduler.h"
@@ -15,6 +16,7 @@ rust_kernel::rust_kernel(rust_srv *srv) :
15
16
max_task_id(0 ),
16
17
rval(0 ),
17
18
live_schedulers(0 ),
19
+ max_sched_id(0 ),
18
20
env(srv->env)
19
21
{
20
22
}
@@ -56,25 +58,45 @@ void rust_kernel::free(void *mem) {
56
58
57
59
rust_sched_id
58
60
rust_kernel::create_scheduler (size_t num_threads) {
59
- I (this , live_schedulers == 0 );
60
- sched = new (this , " rust_scheduler" )
61
- rust_scheduler (this , srv, num_threads, 0 );
62
- live_schedulers = 1 ;
61
+ I (this , !sched_lock.lock_held_by_current_thread ());
62
+ rust_scheduler *sched;
63
+ {
64
+ scoped_lock with (sched_lock);
65
+ rust_sched_id id = max_sched_id++;
66
+ K (srv, id != INTPTR_MAX, " Hit the maximum scheduler id" );
67
+ sched = new (this , " rust_scheduler" )
68
+ rust_scheduler (this , srv, num_threads, id);
69
+ bool is_new = sched_table
70
+ .insert (std::pair<rust_sched_id, rust_scheduler*>(id, sched)).second ;
71
+ A (this , is_new, " Reusing a sched id?" );
72
+ live_schedulers++;
73
+ }
63
74
sched->start_task_threads ();
64
75
return 0 ;
65
76
}
66
77
67
78
rust_scheduler *
68
79
rust_kernel::get_scheduler_by_id (rust_sched_id id) {
69
- return sched;
80
+ I (this , !sched_lock.lock_held_by_current_thread ());
81
+ scoped_lock with (sched_lock);
82
+ sched_map::iterator iter = sched_table.find (id);
83
+ if (iter != sched_table.end ()) {
84
+ return iter->second ;
85
+ } else {
86
+ return NULL ;
87
+ }
70
88
}
71
89
72
90
void
73
91
rust_kernel::release_scheduler_id (rust_sched_id id) {
74
92
I (this , !sched_lock.lock_held_by_current_thread ());
75
93
scoped_lock with (sched_lock);
94
+ sched_map::iterator iter = sched_table.find (id);
95
+ I (this , iter != sched_table.end ());
96
+ rust_scheduler *sched = iter->second ;
97
+ sched_table.erase (iter);
76
98
delete sched;
77
- --live_schedulers ;
99
+ live_schedulers-- ;
78
100
if (live_schedulers == 0 ) {
79
101
// We're all done. Tell the main thread to continue
80
102
sched_lock.signal ();
@@ -93,6 +115,7 @@ rust_kernel::wait_for_schedulers()
93
115
return rval;
94
116
}
95
117
118
+ // FIXME: Fix all these FIXMEs
96
119
void
97
120
rust_kernel::fail () {
98
121
// FIXME: On windows we're getting "Application has requested the
@@ -102,7 +125,29 @@ rust_kernel::fail() {
102
125
#if defined(__WIN32__)
103
126
exit (rval);
104
127
#endif
105
- sched->kill_all_tasks ();
128
+ // Copy the list of schedulers so that we don't hold the lock while
129
+ // running kill_all_tasks.
130
+ // FIXME: There's a lot that happens under kill_all_tasks, and I don't
131
+ // know that holding sched_lock here is ok, but we need to hold the
132
+ // sched lock to prevent the scheduler from being destroyed while
133
+ // we are using it. Probably we need to make rust_scheduler atomicly
134
+ // reference counted.
135
+ std::vector<rust_scheduler*> scheds;
136
+ {
137
+ scoped_lock with (sched_lock);
138
+ for (sched_map::iterator iter = sched_table.begin ();
139
+ iter != sched_table.end (); iter++) {
140
+ scheds.push_back (iter->second );
141
+ }
142
+ }
143
+
144
+ // FIXME: This is not a foolproof way to kill all tasks while ensuring
145
+ // that no new tasks or schedulers are created in the meantime that
146
+ // keep the scheduler alive.
147
+ for (std::vector<rust_scheduler*>::iterator iter = scheds.begin ();
148
+ iter != scheds.end (); iter++) {
149
+ (*iter)->kill_all_tasks ();
150
+ }
106
151
}
107
152
108
153
void
0 commit comments