@@ -2402,14 +2402,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2402
2402
if (!cgrp )
2403
2403
return - ENODEV ;
2404
2404
2405
- retry_find_task :
2405
+ percpu_down_write ( & cgroup_threadgroup_rwsem );
2406
2406
rcu_read_lock ();
2407
2407
if (pid ) {
2408
2408
tsk = find_task_by_vpid (pid );
2409
2409
if (!tsk ) {
2410
- rcu_read_unlock ();
2411
2410
ret = - ESRCH ;
2412
- goto out_unlock_cgroup ;
2411
+ goto out_unlock_rcu ;
2413
2412
}
2414
2413
/*
2415
2414
* even if we're attaching all tasks in the thread group, we
@@ -2419,9 +2418,8 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2419
2418
if (!uid_eq (cred -> euid , GLOBAL_ROOT_UID ) &&
2420
2419
!uid_eq (cred -> euid , tcred -> uid ) &&
2421
2420
!uid_eq (cred -> euid , tcred -> suid )) {
2422
- rcu_read_unlock ();
2423
2421
ret = - EACCES ;
2424
- goto out_unlock_cgroup ;
2422
+ goto out_unlock_rcu ;
2425
2423
}
2426
2424
} else
2427
2425
tsk = current ;
@@ -2436,35 +2434,21 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2436
2434
*/
2437
2435
if (tsk == kthreadd_task || (tsk -> flags & PF_NO_SETAFFINITY )) {
2438
2436
ret = - EINVAL ;
2439
- rcu_read_unlock ();
2440
- goto out_unlock_cgroup ;
2437
+ goto out_unlock_rcu ;
2441
2438
}
2442
2439
2443
2440
get_task_struct (tsk );
2444
2441
rcu_read_unlock ();
2445
2442
2446
- percpu_down_write (& cgroup_threadgroup_rwsem );
2447
- if (threadgroup ) {
2448
- if (!thread_group_leader (tsk )) {
2449
- /*
2450
- * a race with de_thread from another thread's exec()
2451
- * may strip us of our leadership, if this happens,
2452
- * there is no choice but to throw this task away and
2453
- * try again; this is
2454
- * "double-double-toil-and-trouble-check locking".
2455
- */
2456
- percpu_up_write (& cgroup_threadgroup_rwsem );
2457
- put_task_struct (tsk );
2458
- goto retry_find_task ;
2459
- }
2460
- }
2461
-
2462
2443
ret = cgroup_attach_task (cgrp , tsk , threadgroup );
2463
2444
2464
- percpu_up_write (& cgroup_threadgroup_rwsem );
2465
-
2466
2445
put_task_struct (tsk );
2467
- out_unlock_cgroup :
2446
+ goto out_unlock_threadgroup ;
2447
+
2448
+ out_unlock_rcu :
2449
+ rcu_read_unlock ();
2450
+ out_unlock_threadgroup :
2451
+ percpu_up_write (& cgroup_threadgroup_rwsem );
2468
2452
cgroup_kn_unlock (of -> kn );
2469
2453
return ret ?: nbytes ;
2470
2454
}
@@ -2611,6 +2595,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2611
2595
2612
2596
lockdep_assert_held (& cgroup_mutex );
2613
2597
2598
+ percpu_down_write (& cgroup_threadgroup_rwsem );
2599
+
2614
2600
/* look up all csses currently attached to @cgrp's subtree */
2615
2601
down_read (& css_set_rwsem );
2616
2602
css_for_each_descendant_pre (css , cgroup_css (cgrp , NULL )) {
@@ -2666,17 +2652,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2666
2652
goto out_finish ;
2667
2653
last_task = task ;
2668
2654
2669
- percpu_down_write (& cgroup_threadgroup_rwsem );
2670
- /* raced against de_thread() from another thread? */
2671
- if (!thread_group_leader (task )) {
2672
- percpu_up_write (& cgroup_threadgroup_rwsem );
2673
- put_task_struct (task );
2674
- continue ;
2675
- }
2676
-
2677
2655
ret = cgroup_migrate (src_cset -> dfl_cgrp , task , true);
2678
2656
2679
- percpu_up_write (& cgroup_threadgroup_rwsem );
2680
2657
put_task_struct (task );
2681
2658
2682
2659
if (WARN (ret , "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n" , ret ))
@@ -2686,6 +2663,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2686
2663
2687
2664
out_finish :
2688
2665
cgroup_migrate_finish (& preloaded_csets );
2666
+ percpu_up_write (& cgroup_threadgroup_rwsem );
2689
2667
return ret ;
2690
2668
}
2691
2669
0 commit comments