Skip to content

Commit d3f793a

Browse files
torvaldsjfvogel
authored andcommitted
mm: get rid of vmacache_flush_all() entirely
Jann Horn points out that the vmacache_flush_all() function is not only potentially expensive, it's buggy too. It also happens to be entirely unnecessary, because the sequence number overflow case can be avoided by simply making the sequence number be 64-bit. That doesn't even grow the data structures in question, because the other adjacent fields are already 64-bit. So simplify the whole thing by just making the sequence number overflow case go away entirely, which gets rid of all the complications and makes the code faster too. Win-win. [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics also just goes away entirely with this ] Reported-by: Jann Horn <[email protected]> Suggested-by: Will Deacon <[email protected]> Acked-by: Davidlohr Bueso <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]> (cherry picked from commit 7a9cdeb) Conflicts: include/linux/mm_types.h mm/debug.c Orabug: 28693570 CVE: CVE-2018-17182 Reviewed-by: Khalid Aziz <[email protected]> Signed-off-by: Allen Pais <[email protected]>
1 parent ae95b11 commit d3f793a

File tree

6 files changed

+10
-46
lines changed

6 files changed

+10
-46
lines changed

include/linux/mm_types.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,11 @@ struct kioctx_table;
359359
struct mm_struct {
360360
struct vm_area_struct *mmap; /* list of VMAs */
361361
struct rb_root mm_rb;
362+
#ifndef __GENKSYMS__
363+
u64 vmacache_seqnum;
364+
#else
362365
u32 vmacache_seqnum; /* per-thread vmacache */
366+
#endif
363367
#ifdef CONFIG_MMU
364368
unsigned long (*get_unmapped_area) (struct file *filp,
365369
unsigned long addr, unsigned long len,

include/linux/mm_types_task.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,11 @@
3232
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
3333

3434
struct vmacache {
35+
#ifndef __GENKSYMS__
36+
u64 seqnum;
37+
#else
3538
u32 seqnum;
39+
#endif
3640
struct vm_area_struct *vmas[VMACACHE_SIZE];
3741
};
3842

include/linux/vm_event_item.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
105105
#ifdef CONFIG_DEBUG_VM_VMACACHE
106106
VMACACHE_FIND_CALLS,
107107
VMACACHE_FIND_HITS,
108-
VMACACHE_FULL_FLUSHES,
109108
#endif
110109
#ifdef CONFIG_SWAP
111110
SWAP_RA,

include/linux/vmacache.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
1616
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
1717
}
1818

19-
extern void vmacache_flush_all(struct mm_struct *mm);
2019
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
2120
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
2221
unsigned long addr);
@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
3029
static inline void vmacache_invalidate(struct mm_struct *mm)
3130
{
3231
mm->vmacache_seqnum++;
33-
34-
/* deal with overflows */
35-
if (unlikely(mm->vmacache_seqnum == 0))
36-
vmacache_flush_all(mm);
3732
}
3833

3934
#endif /* __LINUX_VMACACHE_H */

mm/debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(dump_vma);
100100

101101
void dump_mm(const struct mm_struct *mm)
102102
{
103-
pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
103+
pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
104104
#ifdef CONFIG_MMU
105105
"get_unmapped_area %p\n"
106106
#endif
@@ -128,7 +128,7 @@ void dump_mm(const struct mm_struct *mm)
128128
"tlb_flush_pending %d\n"
129129
"def_flags: %#lx(%pGv)\n",
130130

131-
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
131+
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
132132
#ifdef CONFIG_MMU
133133
mm->get_unmapped_area,
134134
#endif

mm/vmacache.c

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -7,44 +7,6 @@
77
#include <linux/mm.h>
88
#include <linux/vmacache.h>
99

10-
/*
11-
* Flush vma caches for threads that share a given mm.
12-
*
13-
* The operation is safe because the caller holds the mmap_sem
14-
* exclusively and other threads accessing the vma cache will
15-
* have mmap_sem held at least for read, so no extra locking
16-
* is required to maintain the vma cache.
17-
*/
18-
void vmacache_flush_all(struct mm_struct *mm)
19-
{
20-
struct task_struct *g, *p;
21-
22-
count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
23-
24-
/*
25-
* Single threaded tasks need not iterate the entire
26-
* list of process. We can avoid the flushing as well
27-
* since the mm's seqnum was increased and don't have
28-
* to worry about other threads' seqnum. Current's
29-
* flush will occur upon the next lookup.
30-
*/
31-
if (atomic_read(&mm->mm_users) == 1)
32-
return;
33-
34-
rcu_read_lock();
35-
for_each_process_thread(g, p) {
36-
/*
37-
* Only flush the vmacache pointers as the
38-
* mm seqnum is already set and curr's will
39-
* be set upon invalidation when the next
40-
* lookup is done.
41-
*/
42-
if (mm == p->mm)
43-
vmacache_flush(p);
44-
}
45-
rcu_read_unlock();
46-
}
47-
4810
/*
4911
* This task may be accessing a foreign mm via (for example)
5012
* get_user_pages()->find_vma(). The vmacache is task-local and this

0 commit comments

Comments
 (0)