Skip to content

Commit 31a78f2

Browse files
Balbir Singhtorvalds
authored andcommitted
mm owner: fix race between swapoff and exit
There's a race between mm->owner assignment and swapoff, more easily seen when task slab poisoning is turned on. The condition occurs when try_to_unuse() runs in parallel with an exiting task. A similar race can occur with callers of get_task_mm(), such as /proc/<pid>/<mmstats> or ptrace or page migration. CPU0 CPU1 try_to_unuse looks at mm = task0->mm increments mm->mm_users task 0 exits mm->owner needs to be updated, but no new owner is found (mm_users > 1, but no other task has task->mm = task0->mm) mm_update_next_owner() leaves mmput(mm) decrements mm->mm_users task0 freed dereferencing mm->owner fails The fix is to notify the subsystem via mm_owner_changed callback(), if no new owner is found, by specifying the new task as NULL. Jiri Slaby: mm->owner was set to NULL prior to calling cgroup_mm_owner_callbacks(), but must be set after that, so as not to pass NULL as old owner causing oops. Daisuke Nishimura: mm_update_next_owner() may set mm->owner to NULL, but mem_cgroup_from_task() and its callers need to take account of this situation to avoid oops. Hugh Dickins: Lockdep warning and hang below exec_mmap() when testing these patches. exit_mm() up_reads mmap_sem before calling mm_update_next_owner(), so exec_mmap() now needs to do the same. And with that repositioning, there's now no point in mm_need_new_owner() allowing for NULL mm. Reported-by: Hugh Dickins <[email protected]> Signed-off-by: Balbir Singh <[email protected]> Signed-off-by: Jiri Slaby <[email protected]> Signed-off-by: Daisuke Nishimura <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Cc: Paul Menage <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent bf5cb66 commit 31a78f2

File tree

4 files changed

+31
-5
lines changed

4 files changed

+31
-5
lines changed

fs/exec.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -752,11 +752,11 @@ static int exec_mmap(struct mm_struct *mm)
752752
tsk->active_mm = mm;
753753
activate_mm(active_mm, mm);
754754
task_unlock(tsk);
755-
mm_update_next_owner(old_mm);
756755
arch_pick_mmap_layout(mm);
757756
if (old_mm) {
758757
up_read(&old_mm->mmap_sem);
759758
BUG_ON(active_mm != old_mm);
759+
mm_update_next_owner(old_mm);
760760
mmput(old_mm);
761761
return 0;
762762
}

kernel/cgroup.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child)
27382738
*/
27392739
void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
27402740
{
2741-
struct cgroup *oldcgrp, *newcgrp;
2741+
struct cgroup *oldcgrp, *newcgrp = NULL;
27422742

27432743
if (need_mm_owner_callback) {
27442744
int i;
27452745
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
27462746
struct cgroup_subsys *ss = subsys[i];
27472747
oldcgrp = task_cgroup(old, ss->subsys_id);
2748-
newcgrp = task_cgroup(new, ss->subsys_id);
2748+
if (new)
2749+
newcgrp = task_cgroup(new, ss->subsys_id);
27492750
if (oldcgrp == newcgrp)
27502751
continue;
27512752
if (ss->mm_owner_changed)

kernel/exit.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
583583
* If there are other users of the mm and the owner (us) is exiting
584584
* we need to find a new owner to take on the responsibility.
585585
*/
586-
if (!mm)
587-
return 0;
588586
if (atomic_read(&mm->mm_users) <= 1)
589587
return 0;
590588
if (mm->owner != p)
@@ -627,6 +625,16 @@ void mm_update_next_owner(struct mm_struct *mm)
627625
} while_each_thread(g, c);
628626

629627
read_unlock(&tasklist_lock);
628+
/*
629+
* We found no owner yet mm_users > 1: this implies that we are
630+
* most likely racing with swapoff (try_to_unuse()) or /proc or
631+
* ptrace or page migration (get_task_mm()). Mark owner as NULL,
632+
* so that subsystems can understand the callback and take action.
633+
*/
634+
down_write(&mm->mmap_sem);
635+
cgroup_mm_owner_callbacks(mm->owner, NULL);
636+
mm->owner = NULL;
637+
up_write(&mm->mmap_sem);
630638
return;
631639

632640
assign_new_owner:

mm/memcontrol.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
250250

251251
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252252
{
253+
/*
254+
* mm_update_next_owner() may clear mm->owner to NULL
255+
* if it races with swapoff, page migration, etc.
256+
* So this can be called with p == NULL.
257+
*/
258+
if (unlikely(!p))
259+
return NULL;
260+
253261
return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
254262
struct mem_cgroup, css);
255263
}
@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
549557
if (likely(!memcg)) {
550558
rcu_read_lock();
551559
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
560+
if (unlikely(!mem)) {
561+
rcu_read_unlock();
562+
kmem_cache_free(page_cgroup_cache, pc);
563+
return 0;
564+
}
552565
/*
553566
* For every charge from the cgroup, increment reference count
554567
*/
@@ -801,6 +814,10 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
801814

802815
rcu_read_lock();
803816
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
817+
if (unlikely(!mem)) {
818+
rcu_read_unlock();
819+
return 0;
820+
}
804821
css_get(&mem->css);
805822
rcu_read_unlock();
806823

0 commit comments

Comments
 (0)