Skip to content

Commit 28bd6a1

Browse files
Eric Holkgraydon
authored andcommitted
---
yaml --- r: 3427 b: refs/heads/master c: f6f945f h: refs/heads/master i: 3425: eeada19 3423: 8f5d9dc v: v3
1 parent e47e52f commit 28bd6a1

File tree

10 files changed

+92
-87
lines changed

10 files changed

+92
-87
lines changed

[refs]

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
---
2-
refs/heads/master: c6d83248301b4aed366b9bef682d200381324c01
2+
refs/heads/master: f6f945fed5c8d1061d80b444331910df29afa392

trunk/src/rt/rust.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
111111

112112
DLOG(dom, dom, "Using %d worker threads.", num_threads);
113113

114-
int ret = dom->start_main_loops(num_threads);
114+
int ret = kernel->start_task_threads(num_threads);
115115
delete args;
116116
delete kernel;
117117
delete srv;

trunk/src/rt/rust_builtin.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -391,16 +391,16 @@ task_yield(rust_task *task) {
391391

392392
extern "C" CDECL void
393393
task_join(rust_task *task, rust_task *join_task) {
394-
task->dom->scheduler_lock.lock();
394+
task->kernel->scheduler_lock.lock();
395395
// If the other task is already dying, we don't have to wait for it.
396396
if (join_task->dead() == false) {
397397
join_task->tasks_waiting_to_join.push(task);
398398
task->block(join_task, "joining local task");
399-
task->dom->scheduler_lock.unlock();
399+
task->kernel->scheduler_lock.unlock();
400400
task->yield(2);
401401
}
402402
else {
403-
task->dom->scheduler_lock.unlock();
403+
task->kernel->scheduler_lock.unlock();
404404
}
405405
}
406406

trunk/src/rt/rust_dom.cpp

Lines changed: 9 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ rust_dom::activate(rust_task *task) {
5151

5252
task->ctx.next = &ctx;
5353
DLOG(this, task, "descheduling...");
54-
scheduler_lock.unlock();
54+
kernel->scheduler_lock.unlock();
5555
task->ctx.swap(ctx);
56-
scheduler_lock.lock();
56+
kernel->scheduler_lock.lock();
5757
DLOG(this, task, "task has returned");
5858
}
5959

@@ -167,7 +167,7 @@ rust_dom::number_of_live_tasks() {
167167
*/
168168
void
169169
rust_dom::reap_dead_tasks() {
170-
I(this, scheduler_lock.lock_held_by_current_thread());
170+
I(this, kernel->scheduler_lock.lock_held_by_current_thread());
171171
for (size_t i = 0; i < dead_tasks.length(); ) {
172172
rust_task *task = dead_tasks[i];
173173
// Make sure this task isn't still running somewhere else...
@@ -266,7 +266,7 @@ rust_dom::log_state() {
266266
*/
267267
int
268268
rust_dom::start_main_loop(int id) {
269-
scheduler_lock.lock();
269+
kernel->scheduler_lock.lock();
270270

271271
// Make sure someone is watching, to pull us out of infinite loops.
272272
//
@@ -296,9 +296,9 @@ rust_dom::start_main_loop(int id) {
296296
DLOG(this, task,
297297
"all tasks are blocked, scheduler id %d yielding ...",
298298
id);
299-
scheduler_lock.unlock();
299+
kernel->scheduler_lock.unlock();
300300
sync::sleep(100);
301-
scheduler_lock.lock();
301+
kernel->scheduler_lock.lock();
302302
DLOG(this, task,
303303
"scheduler resuming ...");
304304
continue;
@@ -349,9 +349,9 @@ rust_dom::start_main_loop(int id) {
349349
"scheduler yielding ...",
350350
dead_tasks.length());
351351
log_state();
352-
scheduler_lock.unlock();
352+
kernel->scheduler_lock.unlock();
353353
sync::yield();
354-
scheduler_lock.lock();
354+
kernel->scheduler_lock.lock();
355355
} else {
356356
drain_incoming_message_queue(true);
357357
}
@@ -360,28 +360,7 @@ rust_dom::start_main_loop(int id) {
360360

361361
DLOG(this, dom, "finished main-loop %d (dom.rval = %d)", id, rval);
362362

363-
scheduler_lock.unlock();
364-
return rval;
365-
}
366-
367-
int rust_dom::start_main_loops(int num_threads)
368-
{
369-
dom_worker *worker = NULL;
370-
371-
// -1, because this thread will also be a worker.
372-
for(int i = 0; i < num_threads - 1; ++i) {
373-
worker = new dom_worker(i + 1, this);
374-
worker->start();
375-
threads.push(worker);
376-
}
377-
378-
start_main_loop(0);
379-
380-
while(threads.pop(&worker)) {
381-
worker->join();
382-
delete worker;
383-
}
384-
363+
kernel->scheduler_lock.unlock();
385364
return rval;
386365
}
387366

@@ -392,26 +371,14 @@ rust_dom::get_cache() {
392371

393372
rust_task *
394373
rust_dom::create_task(rust_task *spawner, const char *name) {
395-
//scheduler_lock.lock();
396374
rust_task *task =
397375
new (this) rust_task (this, &newborn_tasks, spawner, name);
398376
DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s",
399377
task, spawner ? spawner->name : "null", name);
400378
newborn_tasks.append(task);
401-
//scheduler_lock.unlock();
402379
return task;
403380
}
404381

405-
rust_dom::dom_worker::dom_worker(int id, rust_dom *owner)
406-
: id(id), owner(owner)
407-
{
408-
}
409-
410-
void rust_dom::dom_worker::run()
411-
{
412-
owner->start_main_loop(id);
413-
}
414-
415382
//
416383
// Local Variables:
417384
// mode: C++

trunk/src/rt/rust_dom.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -97,24 +97,10 @@ struct rust_dom : public kernel_owned<rust_dom>, rc_base<rust_dom>
9797
rust_task *schedule_task();
9898

9999
int start_main_loop(int id);
100-
int start_main_loops(int num_threads);
101100

102101
void log_state();
103102

104103
rust_task *create_task(rust_task *spawner, const char *name);
105-
106-
class dom_worker : public rust_thread {
107-
int id;
108-
rust_dom *owner;
109-
110-
public:
111-
dom_worker(int id, rust_dom *owner);
112-
113-
virtual void run();
114-
};
115-
116-
lock_and_signal scheduler_lock;
117-
array_list<dom_worker *> threads;
118104
};
119105

120106
inline rust_log &

trunk/src/rt/rust_kernel.cpp

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,37 @@ rust_kernel::signal_kernel_lock() {
224224
_kernel_lock.unlock();
225225
}
226226

227+
int rust_kernel::start_task_threads(int num_threads)
228+
{
229+
rust_task_thread *thread = NULL;
230+
231+
// -1, because this thread will also be a thread.
232+
for(int i = 0; i < num_threads - 1; ++i) {
233+
thread = new rust_task_thread(i + 1, this);
234+
thread->start();
235+
threads.push(thread);
236+
}
237+
238+
dom->start_main_loop(0);
239+
240+
while(threads.pop(&thread)) {
241+
thread->join();
242+
delete thread;
243+
}
244+
245+
return dom->rval;
246+
}
247+
248+
rust_task_thread::rust_task_thread(int id, rust_kernel *owner)
249+
: id(id), owner(owner)
250+
{
251+
}
252+
253+
void rust_task_thread::run()
254+
{
255+
owner->dom->start_main_loop(id);
256+
}
257+
227258
//
228259
// Local Variables:
229260
// mode: C++

trunk/src/rt/rust_kernel.h

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ rust_handle :
3434
}
3535
};
3636

37+
class rust_task_thread;
38+
39+
3740
/**
3841
* A global object shared by all thread domains. Most of the data structures
3942
* in this class are synchronized since they are accessed from multiple
@@ -44,8 +47,6 @@ class rust_kernel : public rust_thread {
4447
rust_log _log;
4548
rust_srv *_srv;
4649

47-
rust_dom *dom;
48-
4950
/**
5051
* Task proxy objects are kernel owned handles to Rust objects.
5152
*/
@@ -69,7 +70,11 @@ class rust_kernel : public rust_thread {
6970
rust_dom *create_domain(const char *name);
7071
void destroy_domain();
7172

73+
array_list<rust_task_thread *> threads;
74+
7275
public:
76+
rust_dom *dom;
77+
lock_and_signal scheduler_lock;
7378

7479
/**
7580
* Message queues are kernel objects and are associated with domains.
@@ -105,7 +110,10 @@ class rust_kernel : public rust_thread {
105110
void *malloc(size_t size);
106111
void free(void *mem);
107112

113+
// TODO: this should go away
108114
inline rust_dom *get_domain() const { return dom; }
115+
116+
int start_task_threads(int num_threads);
109117
};
110118

111119
inline void *operator new(size_t size, rust_kernel *kernel) {
@@ -116,4 +124,15 @@ inline void *operator new(size_t size, rust_kernel &kernel) {
116124
return kernel.malloc(size);
117125
}
118126

127+
128+
class rust_task_thread : public rust_thread {
129+
int id;
130+
rust_kernel *owner;
131+
132+
public:
133+
rust_task_thread(int id, rust_kernel *owner);
134+
135+
virtual void run();
136+
};
137+
119138
#endif /* RUST_KERNEL_H */

trunk/src/rt/rust_task.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state,
6161
gc_alloc_chain(0),
6262
dom(dom),
6363
cache(NULL),
64+
kernel(dom->kernel),
6465
name(name),
6566
state(state),
6667
cond(NULL),
@@ -134,7 +135,7 @@ void task_start_wrapper(spawn_args *a)
134135
LOG(task, task, "task exited with value %d", rval);
135136

136137
{
137-
scoped_lock with(task->dom->scheduler_lock);
138+
scoped_lock with(task->kernel->scheduler_lock);
138139

139140
// FIXME: the old exit glue does some magical argument copying
140141
// stuff. This is probably still needed.
@@ -158,9 +159,9 @@ rust_task::start(uintptr_t spawnee_fn,
158159
LOGPTR(dom, "from spawnee", spawnee_fn);
159160

160161
I(dom, stk->data != NULL);
161-
I(dom, !dom->scheduler_lock.lock_held_by_current_thread());
162-
163-
scoped_lock with(dom->scheduler_lock);
162+
I(dom, !kernel->scheduler_lock.lock_held_by_current_thread());
163+
164+
scoped_lock with(kernel->scheduler_lock);
164165

165166
char *sp = (char *)rust_sp;
166167

@@ -412,7 +413,7 @@ rust_task::free(void *p, bool is_gc)
412413

413414
void
414415
rust_task::transition(rust_task_list *src, rust_task_list *dst) {
415-
I(dom, dom->scheduler_lock.lock_held_by_current_thread());
416+
I(dom, kernel->scheduler_lock.lock_held_by_current_thread());
416417
DLOG(dom, task,
417418
"task %s " PTR " state change '%s' -> '%s' while in '%s'",
418419
name, (uintptr_t)this, src->name, dst->name, state->name);

trunk/src/rt/rust_task.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ rust_task : public maybe_proxy<rust_task>,
2222
rust_crate_cache *cache;
2323

2424
// Fields known only to the runtime.
25+
rust_kernel *kernel;
2526
const char *const name;
2627
rust_task_list *state;
2728
rust_cond *cond;

0 commit comments

Comments
 (0)