@@ -211,9 +211,8 @@ static int free_vm_stack_cache(unsigned int cpu)
211
211
return 0 ;
212
212
}
213
213
214
- static int memcg_charge_kernel_stack (struct task_struct * tsk )
214
+ static int memcg_charge_kernel_stack (struct vm_struct * vm )
215
215
{
216
- struct vm_struct * vm = task_stack_vm_area (tsk );
217
216
int i ;
218
217
int ret ;
219
218
@@ -239,6 +238,7 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk)
239
238
240
239
static int alloc_thread_stack_node (struct task_struct * tsk , int node )
241
240
{
241
+ struct vm_struct * vm ;
242
242
void * stack ;
243
243
int i ;
244
244
@@ -256,7 +256,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
256
256
/* Clear stale pointers from reused stack. */
257
257
memset (s -> addr , 0 , THREAD_SIZE );
258
258
259
- if (memcg_charge_kernel_stack (tsk )) {
259
+ if (memcg_charge_kernel_stack (s )) {
260
260
vfree (s -> addr );
261
261
return - ENOMEM ;
262
262
}
@@ -279,7 +279,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
279
279
if (!stack )
280
280
return - ENOMEM ;
281
281
282
- if (memcg_charge_kernel_stack (tsk )) {
282
+ vm = find_vm_area (stack );
283
+ if (memcg_charge_kernel_stack (vm )) {
283
284
vfree (stack );
284
285
return - ENOMEM ;
285
286
}
@@ -288,19 +289,15 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
288
289
* free_thread_stack() can be called in interrupt context,
289
290
* so cache the vm_struct.
290
291
*/
291
- tsk -> stack_vm_area = find_vm_area ( stack ) ;
292
+ tsk -> stack_vm_area = vm ;
292
293
tsk -> stack = stack ;
293
294
return 0 ;
294
295
}
295
296
296
297
static void free_thread_stack (struct task_struct * tsk )
297
298
{
298
- struct vm_struct * vm = task_stack_vm_area (tsk );
299
299
int i ;
300
300
301
- for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
302
- memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
303
-
304
301
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
305
302
if (this_cpu_cmpxchg (cached_stacks [i ], NULL ,
306
303
tsk -> stack_vm_area ) != NULL )
@@ -454,12 +451,25 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
454
451
}
455
452
}
456
453
454
+ void exit_task_stack_account (struct task_struct * tsk )
455
+ {
456
+ account_kernel_stack (tsk , -1 );
457
+
458
+ if (IS_ENABLED (CONFIG_VMAP_STACK )) {
459
+ struct vm_struct * vm ;
460
+ int i ;
461
+
462
+ vm = task_stack_vm_area (tsk );
463
+ for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
464
+ memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
465
+ }
466
+ }
467
+
457
468
static void release_task_stack (struct task_struct * tsk )
458
469
{
459
470
if (WARN_ON (READ_ONCE (tsk -> __state ) != TASK_DEAD ))
460
471
return ; /* Better to leak the stack than to free prematurely */
461
472
462
- account_kernel_stack (tsk , -1 );
463
473
free_thread_stack (tsk );
464
474
}
465
475
@@ -918,6 +928,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
918
928
#ifdef CONFIG_THREAD_INFO_IN_TASK
919
929
refcount_set (& tsk -> stack_refcount , 1 );
920
930
#endif
931
+ account_kernel_stack (tsk , 1 );
921
932
922
933
err = scs_prepare (tsk , node );
923
934
if (err )
@@ -961,8 +972,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
961
972
tsk -> wake_q .next = NULL ;
962
973
tsk -> worker_private = NULL ;
963
974
964
- account_kernel_stack (tsk , 1 );
965
-
966
975
kcov_task_init (tsk );
967
976
kmap_local_fork (tsk );
968
977
@@ -981,6 +990,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
981
990
return tsk ;
982
991
983
992
free_stack :
993
+ exit_task_stack_account (tsk );
984
994
free_thread_stack (tsk );
985
995
free_tsk :
986
996
free_task_struct (tsk );
@@ -2459,6 +2469,7 @@ static __latent_entropy struct task_struct *copy_process(
2459
2469
exit_creds (p );
2460
2470
bad_fork_free :
2461
2471
WRITE_ONCE (p -> __state , TASK_DEAD );
2472
+ exit_task_stack_account (p );
2462
2473
put_task_stack (p );
2463
2474
delayed_free_task (p );
2464
2475
fork_out :
0 commit comments