@@ -122,33 +122,26 @@ rust_task_thread::number_of_live_tasks() {
122
122
void
123
123
rust_task_thread::reap_dead_tasks () {
124
124
I (this , lock.lock_held_by_current_thread ());
125
+
125
126
if (dead_tasks.length () == 0 ) {
126
127
return ;
127
128
}
128
129
130
+ A (this , dead_tasks.length () == 1 ,
131
+ " Only one task should die during a single turn of the event loop" );
132
+
129
133
// First make a copy of the dead_task list with the lock held
130
- size_t dead_tasks_len = dead_tasks.length ();
131
- rust_task **dead_tasks_copy = (rust_task**)
132
- srv->malloc (sizeof (rust_task*) * dead_tasks_len);
133
- for (size_t i = 0 ; i < dead_tasks_len; ++i) {
134
- dead_tasks_copy[i] = dead_tasks.pop_value ();
135
- }
134
+ rust_task *dead_task = dead_tasks.pop_value ();
136
135
137
- // Now unlock again because we have to actually free the dead tasks,
138
- // and that may end up wanting to lock the kernel lock. We have
139
- // a kernel lock -> scheduler lock locking order that we need
140
- // to maintain.
136
+ // Dereferencing the task will probably cause it to be released
137
+ // from the scheduler, which may end up trying to take this lock
141
138
lock.unlock ();
142
139
143
- for (size_t i = 0 ; i < dead_tasks_len; ++i) {
144
- rust_task *task = dead_tasks_copy[i];
145
- // Release the task from the kernel so nobody else can get at it
146
- kernel->release_task_id (task->id );
147
- task->delete_all_stacks ();
148
- // Deref the task, which may cause it to request us to release it
149
- task->deref ();
150
- }
151
- srv->free (dead_tasks_copy);
140
+ // Release the task from the kernel so nobody else can get at it
141
+ kernel->release_task_id (dead_task->id );
142
+ dead_task->delete_all_stacks ();
143
+ // Deref the task, which may cause it to request us to release it
144
+ dead_task->deref ();
152
145
153
146
lock.lock ();
154
147
}
0 commit comments