|
| 1 | +/** |
| 2 | + The rust task is a cooperatively-scheduled green thread that executes |
| 3 | + Rust code on a segmented stack. |
| 4 | +
|
| 5 | + This class has too many responsibilities: |
| 6 | +
|
| 7 | + * Working with the scheduler loop to signal and respond to state changes, |
| 8 | + and dealing with all the thread synchronization issues involved |
| 9 | +
|
| 10 | + * Managing the dynamically resizing list of Rust stack segments |
| 11 | +
|
| 12 | + * Switching between running Rust code on the Rust segmented stack and |
| 13 | + native C code on large stacks owned by the scheduler |
| 14 | +
|
| 15 | + The lifetime of a rust_task object closely mirrors that of a running Rust |
| 16 | + task object, but they are not identical. In particular, the rust_task is an |
| 17 | + atomically reference counted object that might be accessed from arbitrary |
| 18 | + threads at any time. This may keep the task from being destroyed even after |
| 19 | + the task is dead from a Rust task lifecycle perspective. |
| 20 | +
|
| 21 | + FIXME: The task and the scheduler have an over-complicated, undocumented |
| 22 | + protocol for shutting down the task, hopefully without races. It would be |
| 23 | + easier to reason about if other runtime objects could not access the task |
| 24 | + from arbitrary threads, and didn't need to be atomically refcounted. |
| 25 | + */ |
1 | 26 |
|
2 | 27 | #ifndef RUST_TASK_H
|
3 | 28 | #define RUST_TASK_H
|
|
17 | 42 |
|
18 | 43 | // The amount of extra space at the end of each stack segment, available
|
19 | 44 | // to the rt, compiler and dynamic linker for running small functions
|
20 |
| -// FIXME: We want this to be 128 but need to slim the red zone calls down |
| 45 | +// FIXME: We want this to be 128 but need to slim the red zone calls down, |
| 46 | +// disable lazy symbol relocation, and other things we haven't discovered yet |
21 | 47 | #define RZ_LINUX_32 (1024*2)
|
22 | 48 | #define RZ_LINUX_64 (1024*2)
|
23 | 49 | #define RZ_MAC_32 (1024*20)
|
|
59 | 85 | #endif
|
60 | 86 | #endif
|
61 | 87 |
|
62 |
| -extern "C" CDECL void |
63 |
| -record_sp_limit(void *limit); |
64 |
| -extern "C" CDECL uintptr_t |
65 |
| -get_sp_limit(); |
66 |
| - |
67 |
| -// The function prolog compares the amount of stack needed to the end of |
68 |
| -// the stack. As an optimization, when the frame size is less than 256 |
69 |
| -// bytes, it will simply compare %esp to to the stack limit instead of |
70 |
| -// subtracting the frame size. As a result we need our stack limit to |
71 |
| -// account for those 256 bytes. |
72 |
| -const unsigned LIMIT_OFFSET = 256; |
73 |
| - |
74 | 88 | struct rust_box;
|
75 | 89 |
|
76 | 90 | struct frame_glue_fns {
|
@@ -323,14 +337,19 @@ template <typename T> struct task_owned {
|
323 | 337 |
|
324 | 338 | // This stuff is on the stack-switching fast path
|
325 | 339 |
|
326 |
| -// Get a rough approximation of the current stack pointer |
327 |
| -extern "C" uintptr_t get_sp(); |
328 |
| - |
329 |
| -// This is the function that switches stacks by calling another function with |
330 |
| -// a single void* argument while changing the stack pointer. It has a funny |
331 |
| -// name because gdb doesn't normally like to backtrace through split stacks |
332 |
| -// (thinks it indicates a bug), but has a special case to allow functions |
333 |
| -// named __morestack to move the stack pointer around. |
| 340 | +// Records the pointer to the end of the Rust stack in a platform- |
| 341 | +// specific location in the thread control block |
| 342 | +extern "C" CDECL void record_sp_limit(void *limit); |
| 343 | +extern "C" CDECL uintptr_t get_sp_limit(); |
| 344 | +// Gets a pointer to the vicinity of the current stack pointer |
| 345 | +extern "C" uintptr_t get_sp(); |
| 346 | + |
| 347 | +// This is the function that switches between the C and the Rust stack by |
| 348 | +// calling another function with a single void* argument while changing the |
| 349 | +// stack pointer. It has a funny name because gdb doesn't normally like to |
| 350 | +// backtrace through split stacks (thinks it indicates a bug), but has a |
| 351 | +// special case to allow functions named __morestack to move the stack pointer |
| 352 | +// around. |
334 | 353 | extern "C" void __morestack(void *args, void *fn_ptr, uintptr_t stack_ptr);
|
335 | 354 |
|
336 | 355 | inline static uintptr_t
|
@@ -490,6 +509,14 @@ rust_task::prev_stack() {
|
490 | 509 | extern "C" CDECL void
|
491 | 510 | record_sp_limit(void *limit);
|
492 | 511 |
|
| 512 | +// The LLVM-generated segmented-stack function prolog compares the amount of |
| 513 | +// stack needed for each frame to the end-of-stack pointer stored in the |
| 514 | +// TCB. As an optimization, when the frame size is less than 256 bytes, it |
| 515 | +// will simply compare %esp to to the stack limit instead of subtracting the |
| 516 | +// frame size. As a result we need our stack limit to account for those 256 |
| 517 | +// bytes. |
| 518 | +const unsigned LIMIT_OFFSET = 256; |
| 519 | + |
493 | 520 | inline void
|
494 | 521 | rust_task::record_stack_limit() {
|
495 | 522 | assert(stk);
|
|
0 commit comments