@@ -211,6 +211,32 @@ static int free_vm_stack_cache(unsigned int cpu)
211
211
return 0 ;
212
212
}
213
213
214
+ static int memcg_charge_kernel_stack (struct task_struct * tsk )
215
+ {
216
+ struct vm_struct * vm = task_stack_vm_area (tsk );
217
+ int i ;
218
+ int ret ;
219
+
220
+ BUILD_BUG_ON (IS_ENABLED (CONFIG_VMAP_STACK ) && PAGE_SIZE % 1024 != 0 );
221
+ BUG_ON (vm -> nr_pages != THREAD_SIZE / PAGE_SIZE );
222
+
223
+ for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ ) {
224
+ ret = memcg_kmem_charge_page (vm -> pages [i ], GFP_KERNEL , 0 );
225
+ if (ret )
226
+ goto err ;
227
+ }
228
+ return 0 ;
229
+ err :
230
+ /*
231
+ * If memcg_kmem_charge_page() fails, page's memory cgroup pointer is
232
+ * NULL, and memcg_kmem_uncharge_page() in free_thread_stack() will
233
+ * ignore this page.
234
+ */
235
+ for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
236
+ memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
237
+ return ret ;
238
+ }
239
+
214
240
static int alloc_thread_stack_node (struct task_struct * tsk , int node )
215
241
{
216
242
void * stack ;
@@ -230,6 +256,11 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
230
256
/* Clear stale pointers from reused stack. */
231
257
memset (s -> addr , 0 , THREAD_SIZE );
232
258
259
+ if (memcg_charge_kernel_stack (tsk )) {
260
+ vfree (s -> addr );
261
+ return - ENOMEM ;
262
+ }
263
+
233
264
tsk -> stack_vm_area = s ;
234
265
tsk -> stack = s -> addr ;
235
266
return 0 ;
@@ -247,6 +278,11 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
247
278
0 , node , __builtin_return_address (0 ));
248
279
if (!stack )
249
280
return - ENOMEM ;
281
+
282
+ if (memcg_charge_kernel_stack (tsk )) {
283
+ vfree (stack );
284
+ return - ENOMEM ;
285
+ }
250
286
/*
251
287
* We can't call find_vm_area() in interrupt context, and
252
288
* free_thread_stack() can be called in interrupt context,
@@ -418,36 +454,6 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
418
454
}
419
455
}
420
456
421
- static int memcg_charge_kernel_stack (struct task_struct * tsk )
422
- {
423
- #ifdef CONFIG_VMAP_STACK
424
- struct vm_struct * vm = task_stack_vm_area (tsk );
425
- int ret ;
426
-
427
- BUILD_BUG_ON (IS_ENABLED (CONFIG_VMAP_STACK ) && PAGE_SIZE % 1024 != 0 );
428
-
429
- if (vm ) {
430
- int i ;
431
-
432
- BUG_ON (vm -> nr_pages != THREAD_SIZE / PAGE_SIZE );
433
-
434
- for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ ) {
435
- /*
436
- * If memcg_kmem_charge_page() fails, page's
437
- * memory cgroup pointer is NULL, and
438
- * memcg_kmem_uncharge_page() in free_thread_stack()
439
- * will ignore this page.
440
- */
441
- ret = memcg_kmem_charge_page (vm -> pages [i ], GFP_KERNEL ,
442
- 0 );
443
- if (ret )
444
- return ret ;
445
- }
446
- }
447
- #endif
448
- return 0 ;
449
- }
450
-
451
457
static void release_task_stack (struct task_struct * tsk )
452
458
{
453
459
if (WARN_ON (READ_ONCE (tsk -> __state ) != TASK_DEAD ))
@@ -909,9 +915,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
909
915
if (err )
910
916
goto free_tsk ;
911
917
912
- if (memcg_charge_kernel_stack (tsk ))
913
- goto free_stack ;
914
-
915
918
#ifdef CONFIG_THREAD_INFO_IN_TASK
916
919
refcount_set (& tsk -> stack_refcount , 1 );
917
920
#endif
0 commit comments