Skip to content

Commit 94ac30c

Browse files
committed
rt: Improve docs for main, kernel, scheduler, and task
1 parent 9b9ceea commit 94ac30c

File tree

4 files changed

+120
-37
lines changed

4 files changed

+120
-37
lines changed

src/rt/rust.cpp

Lines changed: 30 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,14 @@
1+
/**
2+
* Main entry point into the Rust runtime. Here we initialize the kernel,
3+
* create the initial scheduler and run the main task.
4+
*/
15

26
#include "rust_globals.h"
37
#include "rust_kernel.h"
48
#include "rust_util.h"
59
#include "rust_scheduler.h"
610

11+
// Creates a rust argument vector from the platform argument vector
712
struct
813
command_line_args : public kernel_owned<command_line_args>
914
{
@@ -61,42 +66,60 @@ command_line_args : public kernel_owned<command_line_args>
6166
}
6267
};
6368

64-
/**
65-
* Main entry point into the Rust runtime. Here we create a Rust service,
66-
* initialize the kernel, create the root domain and run it.
67-
*/
68-
69+
// A global that indicates whether Rust typestate claim statements should be
70+
// executed Generated code will read this variable directly (I think).
71+
// FIXME: This belongs somewhere else
6972
int check_claims = 0;
7073

74+
/**
75+
The runtime entrypoint. The (C ABI) main function generated by rustc calls
76+
`rust_start`, providing the address of the Rust ABI main function, the
77+
platform argument vector, and a `crate_map` the provides some logging
78+
metadata.
79+
*/
7180
extern "C" CDECL int
7281
rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) {
7382

83+
// Load runtime configuration options from the environment.
84+
// FIXME #1497: Should provide a way to get these from the command
85+
// line as well.
7486
rust_env *env = load_env();
7587

7688
update_log_settings(crate_map, env->logspec);
89+
90+
// Maybe turn on typestate claim checking
7791
check_claims = env->check_claims;
7892

7993
rust_kernel *kernel = new rust_kernel(env);
94+
95+
// Create the main scheduler and the main task
8096
rust_sched_id sched_id = kernel->create_scheduler(env->num_sched_threads);
8197
rust_scheduler *sched = kernel->get_scheduler_by_id(sched_id);
8298
rust_task *root_task = sched->create_task(NULL, "main");
99+
100+
// Build the command line arguments to pass to the root task
83101
command_line_args *args
84102
= new (kernel, "main command line args")
85103
command_line_args(root_task, argc, argv);
86104

87105
LOG(root_task, dom, "startup: %d args in 0x%" PRIxPTR,
88-
args->argc, (uintptr_t)args->args);
106+
args->argc, (uintptr_t)args->args);
89107
for (int i = 0; i < args->argc; i++) {
90108
LOG(root_task, dom, "startup: arg[%d] = '%s'", i, args->argv[i]);
91109
}
92110

111+
// Schedule the main Rust task
93112
root_task->start((spawn_fn)main_fn, NULL, args->args);
113+
114+
// At this point the task lifecycle is responsible for it
115+
// and our pointer may not be valid
94116
root_task = NULL;
95117

118+
// Run the kernel until all schedulers exit
96119
int ret = kernel->run();
120+
97121
delete args;
98122
delete kernel;
99-
100123
free_env(env);
101124

102125
return ret;

src/rt/rust_kernel.h

Lines changed: 35 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,34 @@
11
// -*- c++ -*-
2+
3+
/**
4+
A single runtime instance.
5+
6+
The kernel is primarily responsible for managing the lifetime of
7+
schedulers, which in turn run rust tasks. It provides a memory
8+
allocator and logging service for use by other runtime components,
9+
it creates unique task and port ids and provides global access
10+
to ports by id.
11+
12+
The kernel runs until there are no live schedulers.
13+
14+
The kernel internally runs an additional, special scheduler called
15+
the 'osmain' (or platform) scheduler, which schedules tasks on the
16+
thread that is running the kernel (normally the thread on which the
17+
C main function was called). This scheduler may be used by Rust
18+
code for interacting with platform APIs that insist on being called
19+
from the main thread.
20+
21+
The requirements of the osmain scheduler has resulted in a complex
22+
process for creating and running scheduler loops that involves
23+
a thing called a 'rust_sched_launcher_factory' whose function I've
24+
already forgotten. rust_scheduler is the main scheduler class,
25+
and tasks are scheduled on individual threads by rust_sched_loop.
26+
27+
Ideally all the in-memory Rust state is encapsulated by a kernel
28+
instance, but there is still some truly global data in the runtime
29+
(like the check claims flag).
30+
*/
31+
232
#ifndef RUST_KERNEL_H
333
#define RUST_KERNEL_H
434

@@ -12,24 +42,20 @@
1242
#include "rust_sched_reaper.h"
1343
#include "util/hash_map.h"
1444

15-
struct rust_task_thread;
1645
class rust_scheduler;
46+
class rust_sched_driver;
47+
class rust_sched_launcher_factory;
48+
struct rust_task_thread;
1749
class rust_port;
1850

51+
// Scheduler, task, and port handles. These uniquely identify within a
52+
// single kernel instance the objects they represent.
1953
typedef intptr_t rust_sched_id;
2054
typedef intptr_t rust_task_id;
2155
typedef intptr_t rust_port_id;
2256

2357
typedef std::map<rust_sched_id, rust_scheduler*> sched_map;
2458

25-
class rust_sched_driver;
26-
class rust_sched_launcher_factory;
27-
28-
/**
29-
* A global object shared by all thread domains. Most of the data structures
30-
* in this class are synchronized since they are accessed from multiple
31-
* threads.
32-
*/
3359
class rust_kernel {
3460
memory_region _region;
3561
rust_log _log;

src/rt/rust_scheduler.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
1+
/**
2+
The rust scheduler. Schedulers may be added to the kernel
3+
dynamically and they run until there are no more tasks to
4+
schedule. Most of the scheduler work is carried out in worker
5+
threads by rust_sched_loop.
6+
*/
7+
18
#ifndef RUST_SCHEDULER_H
29
#define RUST_SCHEDULER_H
310

src/rt/rust_task.h

Lines changed: 48 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,28 @@
1+
/**
2+
The rust task is a cooperatively-scheduled green thread that executes
3+
Rust code on a segmented stack.
4+
5+
This class has too many responsibilities:
6+
7+
* Working with the scheduler loop to signal and respond to state changes,
8+
and dealing with all the thread synchronization issues involved
9+
10+
* Managing the dynamically resizing list of Rust stack segments
11+
12+
* Switching between running Rust code on the Rust segmented stack and
13+
native C code on large stacks owned by the scheduler
14+
15+
The lifetime of a rust_task object closely mirrors that of a running Rust
16+
task object, but they are not identical. In particular, the rust_task is an
17+
atomically reference counted object that might be accessed from arbitrary
18+
threads at any time. This may keep the task from being destroyed even after
19+
the task is dead from a Rust task lifecycle perspective.
20+
21+
FIXME: The task and the scheduler have an over-complicated, undocumented
22+
protocol for shutting down the task, hopefully without races. It would be
23+
easier to reason about if other runtime objects could not access the task
24+
from arbitrary threads, and didn't need to be atomically refcounted.
25+
*/
126

227
#ifndef RUST_TASK_H
328
#define RUST_TASK_H
@@ -17,7 +42,8 @@
1742

1843
// The amount of extra space at the end of each stack segment, available
1944
// to the rt, compiler and dynamic linker for running small functions
20-
// FIXME: We want this to be 128 but need to slim the red zone calls down
45+
// FIXME: We want this to be 128 but need to slim the red zone calls down,
46+
// disable lazy symbol relocation, and other things we haven't discovered yet
2147
#define RZ_LINUX_32 (1024*2)
2248
#define RZ_LINUX_64 (1024*2)
2349
#define RZ_MAC_32 (1024*20)
@@ -59,18 +85,6 @@
5985
#endif
6086
#endif
6187

62-
extern "C" CDECL void
63-
record_sp_limit(void *limit);
64-
extern "C" CDECL uintptr_t
65-
get_sp_limit();
66-
67-
// The function prolog compares the amount of stack needed to the end of
68-
// the stack. As an optimization, when the frame size is less than 256
69-
// bytes, it will simply compare %esp to to the stack limit instead of
70-
// subtracting the frame size. As a result we need our stack limit to
71-
// account for those 256 bytes.
72-
const unsigned LIMIT_OFFSET = 256;
73-
7488
struct rust_box;
7589

7690
struct frame_glue_fns {
@@ -323,14 +337,19 @@ template <typename T> struct task_owned {
323337

324338
// This stuff is on the stack-switching fast path
325339

326-
// Get a rough approximation of the current stack pointer
327-
extern "C" uintptr_t get_sp();
328-
329-
// This is the function that switches stacks by calling another function with
330-
// a single void* argument while changing the stack pointer. It has a funny
331-
// name because gdb doesn't normally like to backtrace through split stacks
332-
// (thinks it indicates a bug), but has a special case to allow functions
333-
// named __morestack to move the stack pointer around.
340+
// Records the pointer to the end of the Rust stack in a platform-
341+
// specific location in the thread control block
342+
extern "C" CDECL void record_sp_limit(void *limit);
343+
extern "C" CDECL uintptr_t get_sp_limit();
344+
// Gets a pointer to the vicinity of the current stack pointer
345+
extern "C" uintptr_t get_sp();
346+
347+
// This is the function that switches between the C and the Rust stack by
348+
// calling another function with a single void* argument while changing the
349+
// stack pointer. It has a funny name because gdb doesn't normally like to
350+
// backtrace through split stacks (thinks it indicates a bug), but has a
351+
// special case to allow functions named __morestack to move the stack pointer
352+
// around.
334353
extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
335354

336355
inline static uintptr_t
@@ -490,6 +509,14 @@ rust_task::prev_stack() {
490509
extern "C" CDECL void
491510
record_sp_limit(void *limit);
492511

512+
// The LLVM-generated segmented-stack function prolog compares the amount of
513+
// stack needed for each frame to the end-of-stack pointer stored in the
514+
// TCB. As an optimization, when the frame size is less than 256 bytes, it
515+
// will simply compare %esp to to the stack limit instead of subtracting the
516+
// frame size. As a result we need our stack limit to account for those 256
517+
// bytes.
518+
const unsigned LIMIT_OFFSET = 256;
519+
493520
inline void
494521
rust_task::record_stack_limit() {
495522
assert(stk);

0 commit comments

Comments
 (0)