Skip to content

Commit b3f77bf

Browse files
committed
rt: Change the way the kernel exits to avoid pthread leaks
This makes the kernel join every scheduler thread before exiting in order to ensure that all threads are completely terminated before the process exits. On my machine, for 32-bit targets, this was causing regular valgrind errors.
1 parent e4c0274 commit b3f77bf

File tree

5 files changed

+44
-28
lines changed

5 files changed

+44
-28
lines changed

src/rt/rust_kernel.cpp

Lines changed: 27 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ rust_kernel::rust_kernel(rust_srv *srv) :
2020
live_tasks(0),
2121
max_task_id(0),
2222
rval(0),
23-
live_schedulers(0),
2423
max_sched_id(0),
2524
env(srv->env)
2625
{
@@ -75,7 +74,6 @@ rust_kernel::create_scheduler(size_t num_threads) {
7574
bool is_new = sched_table
7675
.insert(std::pair<rust_sched_id, rust_scheduler*>(id, sched)).second;
7776
A(this, is_new, "Reusing a sched id?");
78-
live_schedulers++;
7977
}
8078
sched->start_task_threads();
8179
return id;
@@ -97,26 +95,38 @@ void
9795
rust_kernel::release_scheduler_id(rust_sched_id id) {
9896
I(this, !sched_lock.lock_held_by_current_thread());
9997
scoped_lock with(sched_lock);
100-
sched_map::iterator iter = sched_table.find(id);
101-
I(this, iter != sched_table.end());
102-
rust_scheduler *sched = iter->second;
103-
sched_table.erase(iter);
104-
delete sched;
105-
live_schedulers--;
106-
if (live_schedulers == 0) {
107-
// We're all done. Tell the main thread to continue
108-
sched_lock.signal();
109-
}
110-
}
111-
98+
// This list will most likely only ever have a single element in it, but
99+
// it's an actual list because we could potentially get here multiple
100+
// times before the main thread ever calls wait_for_schedulers()
101+
join_list.push_back(id);
102+
sched_lock.signal();
103+
}
104+
105+
/*
106+
Called on the main thread to wait for the kernel to exit. This function is
107+
also used to join on every terminating scheduler thread, so that we can be
108+
sure they have completely exited before the process exits. If we don't join
109+
them then we can see valgrind errors due to un-freed pthread memory.
110+
*/
112111
int
113112
rust_kernel::wait_for_schedulers()
114113
{
115114
I(this, !sched_lock.lock_held_by_current_thread());
116115
scoped_lock with(sched_lock);
117-
// Schedulers could possibly have already exited
118-
if (live_schedulers != 0) {
119-
sched_lock.wait();
116+
while (!sched_table.empty()) {
117+
while (!join_list.empty()) {
118+
rust_sched_id id = join_list.back();
119+
join_list.pop_back();
120+
sched_map::iterator iter = sched_table.find(id);
121+
I(this, iter != sched_table.end());
122+
rust_scheduler *sched = iter->second;
123+
sched_table.erase(iter);
124+
sched->join_task_threads();
125+
delete sched;
126+
}
127+
if (!sched_table.empty()) {
128+
sched_lock.wait();
129+
}
120130
}
121131
return rval;
122132
}

src/rt/rust_kernel.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#define RUST_KERNEL_H
44

55
#include <map>
6+
#include <vector>
67
#include "memory_region.h"
78
#include "rust_log.h"
89

@@ -36,14 +37,15 @@ class rust_kernel {
3637
lock_and_signal rval_lock;
3738
int rval;
3839

39-
// Protects live_schedulers, max_sched_id and sched_table
40+
// Protects max_sched_id and sched_table, join_list
4041
lock_and_signal sched_lock;
41-
// Tracks the number of schedulers currently running.
42-
// When this hits 0 we will signal the sched_lock and the
43-
// kernel will terminate.
44-
uintptr_t live_schedulers;
42+
// The next scheduler id
4543
rust_sched_id max_sched_id;
44+
// A map from scheduler ids to schedulers. When this is empty
45+
// the kernel terminates
4646
sched_map sched_table;
47+
// A list of scheduler ids that are ready to exit
48+
std::vector<rust_sched_id> join_list;
4749

4850
public:
4951

src/rt/rust_scheduler.cpp

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,17 +59,21 @@ rust_scheduler::destroy_task_threads() {
5959
void
6060
rust_scheduler::start_task_threads()
6161
{
62-
// Copy num_threads because it's possible for the last thread
63-
// to terminate and have the kernel delete us before we
64-
// hit the last check against num_threads, in which case
65-
// we would be accessing invalid memory.
66-
uintptr_t num_threads = this->num_threads;
6762
for(size_t i = 0; i < num_threads; ++i) {
6863
rust_task_thread *thread = threads[i];
6964
thread->start();
7065
}
7166
}
7267

68+
void
69+
rust_scheduler::join_task_threads()
70+
{
71+
for(size_t i = 0; i < num_threads; ++i) {
72+
rust_task_thread *thread = threads[i];
73+
thread->join();
74+
}
75+
}
76+
7377
void
7478
rust_scheduler::kill_all_tasks() {
7579
for(size_t i = 0; i < num_threads; ++i) {

src/rt/rust_scheduler.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ class rust_scheduler : public kernel_owned<rust_scheduler> {
3737
~rust_scheduler();
3838

3939
void start_task_threads();
40+
void join_task_threads();
4041
void kill_all_tasks();
4142
rust_task_id create_task(rust_task *spawner,
4243
const char *name,

src/rt/rust_task_thread.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,6 @@ rust_task_thread::create_task(rust_task *spawner, const char *name,
320320

321321
void rust_task_thread::run() {
322322
this->start_main_loop();
323-
detach();
324323
sched->release_task_thread();
325324
}
326325

0 commit comments

Comments
 (0)