Skip to content

Commit 59cc1a4

Browse files
committed
---
yaml --- r: 10992 b: refs/heads/master c: 6c632c7 h: refs/heads/master v: v3
1 parent 8d72490 commit 59cc1a4

File tree

2 files changed

+5
-29
lines changed

2 files changed

+5
-29
lines changed

[refs]

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
---
2-
refs/heads/master: f719ea552bec8eb91a2a02370269eee4404bada7
2+
refs/heads/master: 6c632c71c6f86752fb626501d5368070e3e37385
33
refs/heads/snap-stage1: e33de59e47c5076a89eadeb38f4934f58a3618a6
44
refs/heads/snap-stage3: 4a81779abd786ff22d71434c6d9a5917ea4cdfff
55
refs/heads/try: 2898dcc5d97da9427ac367542382b6239d9c0bbf

trunk/src/rt/rust_scheduler.cpp

Lines changed: 4 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -123,37 +123,13 @@ rust_scheduler::reap_dead_tasks(int id) {
123123
rust_task **dead_tasks_copy = (rust_task**)
124124
srv->malloc(sizeof(rust_task*) * dead_tasks_len);
125125
for (size_t i = 0; i < dead_tasks_len; ++i) {
126-
rust_task *task = dead_tasks[i];
127-
dead_tasks_copy[i] = task;
128-
}
129-
130-
// Now drop the lock and futz with the tasks. This avoids establishing
131-
// a sched->lock then task->lock locking order, which would be devestating
132-
// to performance.
133-
lock.unlock();
134-
135-
for (size_t i = 0; i < dead_tasks_len; ++i) {
136-
rust_task *task = dead_tasks_copy[i];
137-
task->lock.lock();
138-
DLOG(this, task,
139-
"deleting unreferenced dead task %s @0x%" PRIxPTR,
140-
task->name, task);
141-
task->lock.unlock();
142-
}
143-
144-
// Now grab the lock again and remove the tasks that were truly dead
145-
lock.lock();
146-
147-
for (size_t i = 0; i < dead_tasks_len; ++i) {
148-
rust_task *task = dead_tasks_copy[i];
149-
if (task) {
150-
dead_tasks.remove(task);
151-
}
126+
dead_tasks_copy[i] = dead_tasks.pop_value();
152127
}
153128

154129
// Now unlock again because we have to actually free the dead tasks,
155-
// and that may end up wanting to lock the task and sched locks
156-
// again (via target->send)
130+
// and that may end up wanting to lock the kernel lock. We have
131+
// a kernel lock -> scheduler lock locking order that we need
132+
// to maintain.
157133
lock.unlock();
158134

159135
for (size_t i = 0; i < dead_tasks_len; ++i) {

0 commit comments

Comments
 (0)