Skip to content

Commit 7a9cdeb

Browse files
committed
mm: get rid of vmacache_flush_all() entirely
Jann Horn points out that the vmacache_flush_all() function is not only potentially expensive, it's buggy too. It also happens to be entirely unnecessary, because the sequence number overflow case can be avoided by simply making the sequence number be 64-bit. That doesn't even grow the data structures in question, because the other adjacent fields are already 64-bit. So simplify the whole thing by just making the sequence number overflow case go away entirely, which gets rid of all the complications and makes the code faster too. Win-win. [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics also just goes away entirely with this ] Reported-by: Jann Horn <[email protected]> Suggested-by: Will Deacon <[email protected]> Acked-by: Davidlohr Bueso <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 54eda9d commit 7a9cdeb

File tree

6 files changed

+4
-48
lines changed

6 files changed

+4
-48
lines changed

include/linux/mm_types.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ struct mm_struct {
341341
struct {
342342
struct vm_area_struct *mmap; /* list of VMAs */
343343
struct rb_root mm_rb;
344-
u32 vmacache_seqnum; /* per-thread vmacache */
344+
u64 vmacache_seqnum; /* per-thread vmacache */
345345
#ifdef CONFIG_MMU
346346
unsigned long (*get_unmapped_area) (struct file *filp,
347347
unsigned long addr, unsigned long len,

include/linux/mm_types_task.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
3333

3434
struct vmacache {
35-
u32 seqnum;
35+
u64 seqnum;
3636
struct vm_area_struct *vmas[VMACACHE_SIZE];
3737
};
3838

include/linux/vm_event_item.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
105105
#ifdef CONFIG_DEBUG_VM_VMACACHE
106106
VMACACHE_FIND_CALLS,
107107
VMACACHE_FIND_HITS,
108-
VMACACHE_FULL_FLUSHES,
109108
#endif
110109
#ifdef CONFIG_SWAP
111110
SWAP_RA,

include/linux/vmacache.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
1010
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
1111
}
1212

13-
extern void vmacache_flush_all(struct mm_struct *mm);
1413
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
1514
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
1615
unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
2423
static inline void vmacache_invalidate(struct mm_struct *mm)
2524
{
2625
mm->vmacache_seqnum++;
27-
28-
/* deal with overflows */
29-
if (unlikely(mm->vmacache_seqnum == 0))
30-
vmacache_flush_all(mm);
3126
}
3227

3328
#endif /* __LINUX_VMACACHE_H */

mm/debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
114114

115115
void dump_mm(const struct mm_struct *mm)
116116
{
117-
pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
117+
pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
118118
#ifdef CONFIG_MMU
119119
"get_unmapped_area %px\n"
120120
#endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
142142
"tlb_flush_pending %d\n"
143143
"def_flags: %#lx(%pGv)\n",
144144

145-
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
145+
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
146146
#ifdef CONFIG_MMU
147147
mm->get_unmapped_area,
148148
#endif

mm/vmacache.c

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -19,44 +19,6 @@
1919
#endif
2020
#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
2121

22-
/*
23-
* Flush vma caches for threads that share a given mm.
24-
*
25-
* The operation is safe because the caller holds the mmap_sem
26-
* exclusively and other threads accessing the vma cache will
27-
* have mmap_sem held at least for read, so no extra locking
28-
* is required to maintain the vma cache.
29-
*/
30-
void vmacache_flush_all(struct mm_struct *mm)
31-
{
32-
struct task_struct *g, *p;
33-
34-
count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
35-
36-
/*
37-
* Single threaded tasks need not iterate the entire
38-
* list of process. We can avoid the flushing as well
39-
* since the mm's seqnum was increased and don't have
40-
* to worry about other threads' seqnum. Current's
41-
* flush will occur upon the next lookup.
42-
*/
43-
if (atomic_read(&mm->mm_users) == 1)
44-
return;
45-
46-
rcu_read_lock();
47-
for_each_process_thread(g, p) {
48-
/*
49-
* Only flush the vmacache pointers as the
50-
* mm seqnum is already set and curr's will
51-
* be set upon invalidation when the next
52-
* lookup is done.
53-
*/
54-
if (mm == p->mm)
55-
vmacache_flush(p);
56-
}
57-
rcu_read_unlock();
58-
}
59-
6022
/*
6123
* This task may be accessing a foreign mm via (for example)
6224
* get_user_pages()->find_vma(). The vmacache is task-local and this

0 commit comments

Comments
 (0)