18
18
// The amount of extra space at the end of each stack segment, available
19
19
// to the rt, compiler and dynamic linker for running small functions
20
20
// FIXME: We want this to be 128 but need to slim the red zone calls down
21
- #define RZ_LINUX_32 1024 *20
22
- #define RZ_LINUX_64 1024 *20
23
- #define RZ_MAC_32 1024 *20
24
- #define RZ_MAC_64 1024 *20
25
- #define RZ_WIN_32 1024 *20
21
+ #define RZ_LINUX_32 ( 1024 *20 )
22
+ #define RZ_LINUX_64 ( 1024 *20 )
23
+ #define RZ_MAC_32 ( 1024 *20 )
24
+ #define RZ_MAC_64 ( 1024 *20 )
25
+ #define RZ_WIN_32 ( 1024 *20 )
26
26
27
27
#ifdef __linux__
28
28
#ifdef __i386__
49
49
#endif
50
50
#endif
51
51
52
+ // A value that goes at the end of the stack and must not be touched
53
+ const uint8_t stack_canary[] = {0xAB , 0xCD , 0xAB , 0xCD ,
54
+ 0xAB , 0xCD , 0xAB , 0xCD ,
55
+ 0xAB , 0xCD , 0xAB , 0xCD ,
56
+ 0xAB , 0xCD , 0xAB , 0xCD };
57
+
52
58
// Stack size
53
59
size_t g_custom_min_stack_size = 0 ;
54
60
@@ -95,7 +101,8 @@ config_valgrind_stack(stk_seg *stk) {
95
101
// old stack segments, since the act of popping the stack previously
96
102
// caused valgrind to consider the whole thing inaccessible.
97
103
size_t sz = stk->end - (uintptr_t )&stk->data [0 ];
98
- VALGRIND_MAKE_MEM_UNDEFINED (stk->data , sz);
104
+ VALGRIND_MAKE_MEM_UNDEFINED (stk->data + sizeof (stack_canary),
105
+ sz - sizeof (stack_canary));
99
106
#endif
100
107
}
101
108
@@ -110,6 +117,18 @@ free_stk(rust_task *task, stk_seg *stk) {
110
117
task->free (stk);
111
118
}
112
119
120
+ static void
121
+ add_stack_canary (stk_seg *stk) {
122
+ memcpy (stk->data , stack_canary, sizeof (stack_canary));
123
+ assert (sizeof (stack_canary) == 16 && " Stack canary was not the expected size" );
124
+ }
125
+
126
+ static void
127
+ check_stack_canary (stk_seg *stk) {
128
+ assert (!memcmp (stk->data , stack_canary, sizeof (stack_canary))
129
+ && " Somebody killed the canary" );
130
+ }
131
+
113
132
static stk_seg*
114
133
new_stk (rust_scheduler *sched, rust_task *task, size_t requested_sz)
115
134
{
@@ -151,6 +170,7 @@ new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
151
170
stk_seg *stk = (stk_seg *)task->malloc (sz, " stack" );
152
171
LOGPTR (task->sched , " new stk" , (uintptr_t )stk);
153
172
memset (stk, 0 , sizeof (stk_seg));
173
+ add_stack_canary (stk);
154
174
stk->prev = NULL ;
155
175
stk->next = task->stk ;
156
176
stk->end = (uintptr_t ) &stk->data [rust_stk_sz + RED_ZONE_SIZE];
@@ -165,6 +185,7 @@ static void
165
185
del_stk (rust_task *task, stk_seg *stk)
166
186
{
167
187
assert (stk == task->stk && " Freeing stack segments out of order!" );
188
+ check_stack_canary (stk);
168
189
169
190
task->stk = stk->next ;
170
191
@@ -268,25 +289,18 @@ struct rust_closure_env {
268
289
type_desc *td;
269
290
};
270
291
271
- // This runs on the Rust stack
272
- extern " C" CDECL
273
- void task_start_wrapper (spawn_args *a)
274
- {
275
- rust_task *task = a->task ;
276
- int rval = 42 ;
292
+ struct cleanup_args {
293
+ spawn_args *spargs;
294
+ bool failed;
295
+ };
277
296
278
- bool failed = false ;
279
- try {
280
- a->f (&rval, a->a3 , a->a4 );
281
- } catch (rust_task *ex) {
282
- A (task->sched , ex == task,
283
- " Expected this task to be thrown for unwinding" );
284
- failed = true ;
285
- }
297
+ void
298
+ cleanup_task (cleanup_args *args) {
299
+ spawn_args *a = args->spargs ;
300
+ bool failed = args->failed ;
301
+ rust_task *task = a->task ;
286
302
287
- // We're on the Rust stack and the cycle collector may recurse arbitrarily
288
- // deep, so switch to the C stack
289
- task->sched ->c_context .call_shim_on_c_stack (task, (void *)cc::do_cc);
303
+ cc::do_cc (task);
290
304
291
305
rust_closure_env* env = (rust_closure_env*)a->a3 ;
292
306
if (env) {
@@ -313,6 +327,29 @@ void task_start_wrapper(spawn_args *a)
313
327
A (task->sched , false , " Shouldn't happen" );
314
328
#endif
315
329
}
330
+ }
331
+
332
+ // This runs on the Rust stack
333
+ extern " C" CDECL
334
+ void task_start_wrapper (spawn_args *a)
335
+ {
336
+ rust_task *task = a->task ;
337
+ int rval = 42 ;
338
+
339
+ bool failed = false ;
340
+ try {
341
+ a->f (&rval, a->a3 , a->a4 );
342
+ } catch (rust_task *ex) {
343
+ A (task->sched , ex == task,
344
+ " Expected this task to be thrown for unwinding" );
345
+ failed = true ;
346
+ }
347
+
348
+ cleanup_args ca = {a, failed};
349
+
350
+ // The cleanup work needs lots of stack
351
+ task->sched ->c_context .call_shim_on_c_stack (&ca, (void *)cleanup_task);
352
+
316
353
task->ctx .next ->swap (task->ctx );
317
354
}
318
355
@@ -768,6 +805,11 @@ rust_task::on_rust_stack() {
768
805
return sp_in_stk_seg (get_sp (), stk);
769
806
}
770
807
808
+ void
809
+ rust_task::check_stack_canary () {
810
+ ::check_stack_canary (stk);
811
+ }
812
+
771
813
//
772
814
// Local Variables:
773
815
// mode: C++
0 commit comments