Skip to content

Commit 0c98625

Browse files
committed
Revert "sched, cgroup: replace signal_struct->group_rwsem with a global percpu_rwsem"
This reverts commit d59cfc0. d59cfc0 ("sched, cgroup: replace signal_struct->group_rwsem with a global percpu_rwsem") and b5ba75b ("cgroup: simplify threadgroup locking") changed how cgroup synchronizes against task fork and exits so that it uses global percpu_rwsem instead of per-process rwsem; unfortunately, the write [un]lock paths of percpu_rwsem always involve synchronize_rcu_expedited() which turned out to be too expensive. Improvements for percpu_rwsem are scheduled to be merged in the coming v4.4-rc1 merge window which alleviates this issue. For now, revert the two commits to restore per-process rwsem. They will be re-applied for the v4.4-rc1 merge window. Signed-off-by: Tejun Heo <[email protected]> Link: http://lkml.kernel.org/g/[email protected] Reported-by: Christian Borntraeger <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: [email protected] # v4.2+
1 parent f9f9e7b commit 0c98625

File tree

5 files changed

+83
-45
lines changed

5 files changed

+83
-45
lines changed

include/linux/cgroup-defs.h

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -473,31 +473,8 @@ struct cgroup_subsys {
473473
unsigned int depends_on;
474474
};
475475

476-
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
477-
478-
/**
479-
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
480-
* @tsk: target task
481-
*
482-
* Called from threadgroup_change_begin() and allows cgroup operations to
483-
* synchronize against threadgroup changes using a percpu_rw_semaphore.
484-
*/
485-
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
486-
{
487-
percpu_down_read(&cgroup_threadgroup_rwsem);
488-
}
489-
490-
/**
491-
* cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
492-
* @tsk: target task
493-
*
494-
* Called from threadgroup_change_end(). Counterpart of
495-
* cgroup_threadcgroup_change_begin().
496-
*/
497-
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
498-
{
499-
percpu_up_read(&cgroup_threadgroup_rwsem);
500-
}
476+
void cgroup_threadgroup_change_begin(struct task_struct *tsk);
477+
void cgroup_threadgroup_change_end(struct task_struct *tsk);
501478

502479
#else /* CONFIG_CGROUPS */
503480

include/linux/init_task.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,13 @@
2525
extern struct files_struct init_files;
2626
extern struct fs_struct init_fs;
2727

28+
#ifdef CONFIG_CGROUPS
29+
#define INIT_GROUP_RWSEM(sig) \
30+
.group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
31+
#else
32+
#define INIT_GROUP_RWSEM(sig)
33+
#endif
34+
2835
#ifdef CONFIG_CPUSETS
2936
#define INIT_CPUSET_SEQ(tsk) \
3037
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
5764
INIT_PREV_CPUTIME(sig) \
5865
.cred_guard_mutex = \
5966
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
67+
INIT_GROUP_RWSEM(sig) \
6068
}
6169

6270
extern struct nsproxy init_nsproxy;

include/linux/sched.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -762,6 +762,18 @@ struct signal_struct {
762762
unsigned audit_tty_log_passwd;
763763
struct tty_audit_buf *tty_audit_buf;
764764
#endif
765+
#ifdef CONFIG_CGROUPS
766+
/*
767+
* group_rwsem prevents new tasks from entering the threadgroup and
768+
* member tasks from exiting,a more specifically, setting of
769+
* PF_EXITING. fork and exit paths are protected with this rwsem
770+
* using threadgroup_change_begin/end(). Users which require
771+
* threadgroup to remain stable should use threadgroup_[un]lock()
772+
* which also takes care of exec path. Currently, cgroup is the
773+
* only user.
774+
*/
775+
struct rw_semaphore group_rwsem;
776+
#endif
765777

766778
oom_flags_t oom_flags;
767779
short oom_score_adj; /* OOM kill score adjustment */

kernel/cgroup.c

Lines changed: 57 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
#include <linux/slab.h>
4747
#include <linux/spinlock.h>
4848
#include <linux/rwsem.h>
49-
#include <linux/percpu-rwsem.h>
5049
#include <linux/string.h>
5150
#include <linux/sort.h>
5251
#include <linux/kmod.h>
@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
104103
*/
105104
static DEFINE_SPINLOCK(release_agent_path_lock);
106105

107-
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
108-
109106
#define cgroup_assert_mutex_or_rcu_locked() \
110107
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
111108
!lockdep_is_held(&cgroup_mutex), \
@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
874871
return cset;
875872
}
876873

874+
void cgroup_threadgroup_change_begin(struct task_struct *tsk)
875+
{
876+
down_read(&tsk->signal->group_rwsem);
877+
}
878+
879+
void cgroup_threadgroup_change_end(struct task_struct *tsk)
880+
{
881+
up_read(&tsk->signal->group_rwsem);
882+
}
883+
884+
/**
885+
* threadgroup_lock - lock threadgroup
886+
* @tsk: member task of the threadgroup to lock
887+
*
888+
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
889+
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
890+
* change ->group_leader/pid. This is useful for cases where the threadgroup
891+
* needs to stay stable across blockable operations.
892+
*
893+
* fork and exit explicitly call threadgroup_change_{begin|end}() for
894+
* synchronization. While held, no new task will be added to threadgroup
895+
* and no existing live task will have its PF_EXITING set.
896+
*
897+
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
898+
* sub-thread becomes a new leader.
899+
*/
900+
static void threadgroup_lock(struct task_struct *tsk)
901+
{
902+
down_write(&tsk->signal->group_rwsem);
903+
}
904+
905+
/**
906+
* threadgroup_unlock - unlock threadgroup
907+
* @tsk: member task of the threadgroup to unlock
908+
*
909+
* Reverse threadgroup_lock().
910+
*/
911+
static inline void threadgroup_unlock(struct task_struct *tsk)
912+
{
913+
up_write(&tsk->signal->group_rwsem);
914+
}
915+
877916
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
878917
{
879918
struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
20742113
lockdep_assert_held(&css_set_rwsem);
20752114

20762115
/*
2077-
* We are synchronized through cgroup_threadgroup_rwsem against
2078-
* PF_EXITING setting such that we can't race against cgroup_exit()
2079-
* changing the css_set to init_css_set and dropping the old one.
2116+
* We are synchronized through threadgroup_lock() against PF_EXITING
2117+
* setting such that we can't race against cgroup_exit() changing the
2118+
* css_set to init_css_set and dropping the old one.
20802119
*/
20812120
WARN_ON_ONCE(tsk->flags & PF_EXITING);
20822121
old_cset = task_css_set(tsk);
@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
21332172
* @src_cset and add it to @preloaded_csets, which should later be cleaned
21342173
* up by cgroup_migrate_finish().
21352174
*
2136-
* This function may be called without holding cgroup_threadgroup_rwsem
2137-
* even if the target is a process. Threads may be created and destroyed
2138-
* but as long as cgroup_mutex is not dropped, no new css_set can be put
2139-
* into play and the preloaded css_sets are guaranteed to cover all
2140-
* migrations.
2175+
* This function may be called without holding threadgroup_lock even if the
2176+
* target is a process. Threads may be created and destroyed but as long
2177+
* as cgroup_mutex is not dropped, no new css_set can be put into play and
2178+
* the preloaded css_sets are guaranteed to cover all migrations.
21412179
*/
21422180
static void cgroup_migrate_add_src(struct css_set *src_cset,
21432181
struct cgroup *dst_cgrp,
@@ -2240,7 +2278,7 @@ static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
22402278
* @threadgroup: whether @leader points to the whole process or a single task
22412279
*
22422280
* Migrate a process or task denoted by @leader to @cgrp. If migrating a
2243-
* process, the caller must be holding cgroup_threadgroup_rwsem. The
2281+
* process, the caller must be holding threadgroup_lock of @leader. The
22442282
* caller is also responsible for invoking cgroup_migrate_add_src() and
22452283
* cgroup_migrate_prepare_dst() on the targets before invoking this
22462284
* function and following up with cgroup_migrate_finish().
@@ -2368,7 +2406,7 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
23682406
* @leader: the task or the leader of the threadgroup to be attached
23692407
* @threadgroup: attach the whole threadgroup?
23702408
*
2371-
* Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2409+
* Call holding cgroup_mutex and threadgroup_lock of @leader.
23722410
*/
23732411
static int cgroup_attach_task(struct cgroup *dst_cgrp,
23742412
struct task_struct *leader, bool threadgroup)
@@ -2490,7 +2528,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
24902528
get_task_struct(tsk);
24912529
rcu_read_unlock();
24922530

2493-
percpu_down_write(&cgroup_threadgroup_rwsem);
2531+
threadgroup_lock(tsk);
24942532
if (threadgroup) {
24952533
if (!thread_group_leader(tsk)) {
24962534
/*
@@ -2500,7 +2538,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
25002538
* try again; this is
25012539
* "double-double-toil-and-trouble-check locking".
25022540
*/
2503-
percpu_up_write(&cgroup_threadgroup_rwsem);
2541+
threadgroup_unlock(tsk);
25042542
put_task_struct(tsk);
25052543
goto retry_find_task;
25062544
}
@@ -2510,7 +2548,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
25102548
if (!ret)
25112549
ret = cgroup_attach_task(cgrp, tsk, threadgroup);
25122550

2513-
percpu_up_write(&cgroup_threadgroup_rwsem);
2551+
threadgroup_unlock(tsk);
25142552

25152553
put_task_struct(tsk);
25162554
out_unlock_cgroup:
@@ -2713,17 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
27132751
goto out_finish;
27142752
last_task = task;
27152753

2716-
percpu_down_write(&cgroup_threadgroup_rwsem);
2754+
threadgroup_lock(task);
27172755
/* raced against de_thread() from another thread? */
27182756
if (!thread_group_leader(task)) {
2719-
percpu_up_write(&cgroup_threadgroup_rwsem);
2757+
threadgroup_unlock(task);
27202758
put_task_struct(task);
27212759
continue;
27222760
}
27232761

27242762
ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
27252763

2726-
percpu_up_write(&cgroup_threadgroup_rwsem);
2764+
threadgroup_unlock(task);
27272765
put_task_struct(task);
27282766

27292767
if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
@@ -5045,7 +5083,6 @@ int __init cgroup_init(void)
50455083
unsigned long key;
50465084
int ssid, err;
50475085

5048-
BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
50495086
BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
50505087
BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
50515088

kernel/fork.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
11491149
tty_audit_fork(sig);
11501150
sched_autogroup_fork(sig);
11511151

1152+
#ifdef CONFIG_CGROUPS
1153+
init_rwsem(&sig->group_rwsem);
1154+
#endif
1155+
11521156
sig->oom_score_adj = current->signal->oom_score_adj;
11531157
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
11541158

0 commit comments

Comments
 (0)