Skip to content

Commit 97a231e

Browse files
committed
---
yaml --- r: 13998 b: refs/heads/try c: 6c632c7 h: refs/heads/master v: v3
1 parent eb618d7 commit 97a231e

File tree

2 files changed

+5
-29
lines changed

2 files changed

+5
-29
lines changed

[refs]

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22
refs/heads/master: 61b1875c16de39c166b0f4d54bba19f9c6777d1a
33
refs/heads/snap-stage1: e33de59e47c5076a89eadeb38f4934f58a3618a6
44
refs/heads/snap-stage3: 4a81779abd786ff22d71434c6d9a5917ea4cdfff
5-
refs/heads/try: f719ea552bec8eb91a2a02370269eee4404bada7
5+
refs/heads/try: 6c632c71c6f86752fb626501d5368070e3e37385
66
refs/tags/release-0.1: 1f5c5126e96c79d22cb7862f75304136e204f105

branches/try/src/rt/rust_scheduler.cpp

Lines changed: 4 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -123,37 +123,13 @@ rust_scheduler::reap_dead_tasks(int id) {
123123
rust_task **dead_tasks_copy = (rust_task**)
124124
srv->malloc(sizeof(rust_task*) * dead_tasks_len);
125125
for (size_t i = 0; i < dead_tasks_len; ++i) {
126-
rust_task *task = dead_tasks[i];
127-
dead_tasks_copy[i] = task;
128-
}
129-
130-
// Now drop the lock and futz with the tasks. This avoids establishing
131-
// a sched->lock then task->lock locking order, which would be devestating
132-
// to performance.
133-
lock.unlock();
134-
135-
for (size_t i = 0; i < dead_tasks_len; ++i) {
136-
rust_task *task = dead_tasks_copy[i];
137-
task->lock.lock();
138-
DLOG(this, task,
139-
"deleting unreferenced dead task %s @0x%" PRIxPTR,
140-
task->name, task);
141-
task->lock.unlock();
142-
}
143-
144-
// Now grab the lock again and remove the tasks that were truly dead
145-
lock.lock();
146-
147-
for (size_t i = 0; i < dead_tasks_len; ++i) {
148-
rust_task *task = dead_tasks_copy[i];
149-
if (task) {
150-
dead_tasks.remove(task);
151-
}
126+
dead_tasks_copy[i] = dead_tasks.pop_value();
152127
}
153128

154129
// Now unlock again because we have to actually free the dead tasks,
155-
// and that may end up wanting to lock the task and sched locks
156-
// again (via target->send)
130+
// and that may end up wanting to lock the kernel lock. We have
131+
// a kernel lock -> scheduler lock locking order that we need
132+
// to maintain.
157133
lock.unlock();
158134

159135
for (size_t i = 0; i < dead_tasks_len; ++i) {

0 commit comments

Comments
 (0)