File tree Expand file tree Collapse file tree 2 files changed +5
-29
lines changed Expand file tree Collapse file tree 2 files changed +5
-29
lines changed Original file line number Diff line number Diff line change 2
2
refs/heads/master: 61b1875c16de39c166b0f4d54bba19f9c6777d1a
3
3
refs/heads/snap-stage1: e33de59e47c5076a89eadeb38f4934f58a3618a6
4
4
refs/heads/snap-stage3: 4a81779abd786ff22d71434c6d9a5917ea4cdfff
5
- refs/heads/try: f719ea552bec8eb91a2a02370269eee4404bada7
5
+ refs/heads/try: 6c632c71c6f86752fb626501d5368070e3e37385
6
6
refs/tags/release-0.1: 1f5c5126e96c79d22cb7862f75304136e204f105
Original file line number Diff line number Diff line change @@ -123,37 +123,13 @@ rust_scheduler::reap_dead_tasks(int id) {
123
123
rust_task **dead_tasks_copy = (rust_task**)
124
124
srv->malloc (sizeof (rust_task*) * dead_tasks_len);
125
125
for (size_t i = 0 ; i < dead_tasks_len; ++i) {
126
- rust_task *task = dead_tasks[i];
127
- dead_tasks_copy[i] = task;
128
- }
129
-
130
- // Now drop the lock and futz with the tasks. This avoids establishing
131
- // a sched->lock then task->lock locking order, which would be devestating
132
- // to performance.
133
- lock.unlock ();
134
-
135
- for (size_t i = 0 ; i < dead_tasks_len; ++i) {
136
- rust_task *task = dead_tasks_copy[i];
137
- task->lock .lock ();
138
- DLOG (this , task,
139
- " deleting unreferenced dead task %s @0x%" PRIxPTR,
140
- task->name , task);
141
- task->lock .unlock ();
142
- }
143
-
144
- // Now grab the lock again and remove the tasks that were truly dead
145
- lock.lock ();
146
-
147
- for (size_t i = 0 ; i < dead_tasks_len; ++i) {
148
- rust_task *task = dead_tasks_copy[i];
149
- if (task) {
150
- dead_tasks.remove (task);
151
- }
126
+ dead_tasks_copy[i] = dead_tasks.pop_value ();
152
127
}
153
128
154
129
// Now unlock again because we have to actually free the dead tasks,
155
- // and that may end up wanting to lock the task and sched locks
156
- // again (via target->send)
130
+ // and that may end up wanting to lock the kernel lock. We have
131
+ // a kernel lock -> scheduler lock locking order that we need
132
+ // to maintain.
157
133
lock.unlock ();
158
134
159
135
for (size_t i = 0 ; i < dead_tasks_len; ++i) {
You can’t perform that action at this time.
0 commit comments