Skip to content

Commit 7964cf8

Browse files
howlettakpm00
authored andcommitted
mm: remove vmacache
By using the maple tree and the maple tree state, the vmacache is no longer beneficial and is complicating the VMA code. Remove the vmacache to reduce the work in keeping it up to date and code complexity. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Liam R. Howlett <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Tested-by: Yu Zhao <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Howells <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 4dd1b84 commit 7964cf8

File tree

17 files changed

+9
-267
lines changed

17 files changed

+9
-267
lines changed

fs/exec.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
#include <linux/file.h>
2929
#include <linux/fdtable.h>
3030
#include <linux/mm.h>
31-
#include <linux/vmacache.h>
3231
#include <linux/stat.h>
3332
#include <linux/fcntl.h>
3433
#include <linux/swap.h>
@@ -1027,8 +1026,6 @@ static int exec_mmap(struct mm_struct *mm)
10271026
activate_mm(active_mm, mm);
10281027
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
10291028
local_irq_enable();
1030-
tsk->mm->vmacache_seqnum = 0;
1031-
vmacache_flush(tsk);
10321029
task_unlock(tsk);
10331030
lru_gen_use_mm(mm);
10341031

fs/proc/task_mmu.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
// SPDX-License-Identifier: GPL-2.0
22
#include <linux/pagewalk.h>
3-
#include <linux/vmacache.h>
43
#include <linux/mm_inline.h>
54
#include <linux/hugetlb.h>
65
#include <linux/huge_mm.h>

include/linux/mm_types.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -475,7 +475,6 @@ struct mm_struct {
475475
struct {
476476
struct vm_area_struct *mmap; /* list of VMAs */
477477
struct maple_tree mm_mt;
478-
u64 vmacache_seqnum; /* per-thread vmacache */
479478
#ifdef CONFIG_MMU
480479
unsigned long (*get_unmapped_area) (struct file *filp,
481480
unsigned long addr, unsigned long len,

include/linux/mm_types_task.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,18 +24,6 @@
2424
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
2525
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
2626

27-
/*
28-
* The per task VMA cache array:
29-
*/
30-
#define VMACACHE_BITS 2
31-
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
32-
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
33-
34-
struct vmacache {
35-
u64 seqnum;
36-
struct vm_area_struct *vmas[VMACACHE_SIZE];
37-
};
38-
3927
/*
4028
* When updating this, please also update struct resident_page_types[] in
4129
* kernel/fork.c

include/linux/sched.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -861,7 +861,6 @@ struct task_struct {
861861
struct mm_struct *active_mm;
862862

863863
/* Per-thread vma caching: */
864-
struct vmacache vmacache;
865864

866865
#ifdef SPLIT_RSS_COUNTING
867866
struct task_rss_stat rss_stat;

include/linux/vm_event_item.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,10 +129,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
129129
NR_TLB_LOCAL_FLUSH_ALL,
130130
NR_TLB_LOCAL_FLUSH_ONE,
131131
#endif /* CONFIG_DEBUG_TLBFLUSH */
132-
#ifdef CONFIG_DEBUG_VM_VMACACHE
133-
VMACACHE_FIND_CALLS,
134-
VMACACHE_FIND_HITS,
135-
#endif
136132
#ifdef CONFIG_SWAP
137133
SWAP_RA,
138134
SWAP_RA_HIT,

include/linux/vmacache.h

Lines changed: 0 additions & 28 deletions
This file was deleted.

include/linux/vmstat.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -125,12 +125,6 @@ static inline void vm_events_fold_cpu(int cpu)
125125
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126126
#endif
127127

128-
#ifdef CONFIG_DEBUG_VM_VMACACHE
129-
#define count_vm_vmacache_event(x) count_vm_event(x)
130-
#else
131-
#define count_vm_vmacache_event(x) do {} while (0)
132-
#endif
133-
134128
#define __count_zid_vm_events(item, zid, delta) \
135129
__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
136130

kernel/debug/debug_core.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@
5050
#include <linux/pid.h>
5151
#include <linux/smp.h>
5252
#include <linux/mm.h>
53-
#include <linux/vmacache.h>
5453
#include <linux/rcupdate.h>
5554
#include <linux/irq.h>
5655
#include <linux/security.h>
@@ -283,17 +282,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
283282
if (!CACHE_FLUSH_IS_SAFE)
284283
return;
285284

286-
if (current->mm) {
287-
int i;
288-
289-
for (i = 0; i < VMACACHE_SIZE; i++) {
290-
if (!current->vmacache.vmas[i])
291-
continue;
292-
flush_cache_range(current->vmacache.vmas[i],
293-
addr, addr + BREAK_INSTR_SIZE);
294-
}
295-
}
296-
297285
/* Force flush instruction cache if it was outside the mm */
298286
flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
299287
}

kernel/fork.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
#include <linux/fs.h>
4444
#include <linux/mm.h>
4545
#include <linux/mm_inline.h>
46-
#include <linux/vmacache.h>
4746
#include <linux/nsproxy.h>
4847
#include <linux/capability.h>
4948
#include <linux/cpu.h>
@@ -1128,7 +1127,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
11281127
mm->mmap = NULL;
11291128
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
11301129
mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
1131-
mm->vmacache_seqnum = 0;
11321130
atomic_set(&mm->mm_users, 1);
11331131
atomic_set(&mm->mm_count, 1);
11341132
seqcount_init(&mm->write_protect_seq);
@@ -1585,9 +1583,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
15851583
if (!oldmm)
15861584
return 0;
15871585

1588-
/* initialize the new vmacache entries */
1589-
vmacache_flush(tsk);
1590-
15911586
if (clone_flags & CLONE_VM) {
15921587
mmget(oldmm);
15931588
mm = oldmm;

lib/Kconfig.debug

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -812,14 +812,6 @@ config DEBUG_VM
812812

813813
If unsure, say N.
814814

815-
config DEBUG_VM_VMACACHE
816-
bool "Debug VMA caching"
817-
depends on DEBUG_VM
818-
help
819-
Enable this to turn on VMA caching debug information. Doing so
820-
can cause significant overhead, so only enable it in non-production
821-
environments.
822-
823815
config DEBUG_VM_MAPLE_TREE
824816
bool "Debug VM maple trees"
825817
depends on DEBUG_VM

mm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
5252
readahead.o swap.o truncate.o vmscan.o shmem.o \
5353
util.o mmzone.o vmstat.o backing-dev.o \
5454
mm_init.o percpu.o slab_common.o \
55-
compaction.o vmacache.o \
55+
compaction.o \
5656
interval_tree.o list_lru.o workingset.o \
5757
debug.o gup.o mmap_lock.o $(mmu-y)
5858

mm/debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(dump_vma);
155155

156156
void dump_mm(const struct mm_struct *mm)
157157
{
158-
pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
158+
pr_emerg("mm %px mmap %px task_size %lu\n"
159159
#ifdef CONFIG_MMU
160160
"get_unmapped_area %px\n"
161161
#endif
@@ -183,7 +183,7 @@ void dump_mm(const struct mm_struct *mm)
183183
"tlb_flush_pending %d\n"
184184
"def_flags: %#lx(%pGv)\n",
185185

186-
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
186+
mm, mm->mmap, mm->task_size,
187187
#ifdef CONFIG_MMU
188188
mm->get_unmapped_area,
189189
#endif

mm/mmap.c

Lines changed: 2 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#include <linux/backing-dev.h>
1515
#include <linux/mm.h>
1616
#include <linux/mm_inline.h>
17-
#include <linux/vmacache.h>
1817
#include <linux/shm.h>
1918
#include <linux/mman.h>
2019
#include <linux/pagemap.h>
@@ -680,9 +679,6 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
680679
/* Remove from mm linked list - also updates highest_vm_end */
681680
__vma_unlink_list(mm, next);
682681

683-
/* Kill the cache */
684-
vmacache_invalidate(mm);
685-
686682
if (file)
687683
__remove_shared_vm_struct(next, file, mapping);
688684

@@ -923,8 +919,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
923919
__vma_unlink_list(mm, next);
924920
if (remove_next == 2)
925921
__vma_unlink_list(mm, next_next);
926-
/* Kill the cache */
927-
vmacache_invalidate(mm);
928922

929923
if (file) {
930924
__remove_shared_vm_struct(next, file, mapping);
@@ -2233,19 +2227,10 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
22332227
unsigned long start_addr,
22342228
unsigned long end_addr)
22352229
{
2236-
struct vm_area_struct *vma;
22372230
unsigned long index = start_addr;
22382231

22392232
mmap_assert_locked(mm);
2240-
/* Check the cache first. */
2241-
vma = vmacache_find(mm, start_addr);
2242-
if (likely(vma))
2243-
return vma;
2244-
2245-
vma = mt_find(&mm->mm_mt, &index, end_addr - 1);
2246-
if (vma)
2247-
vmacache_update(start_addr, vma);
2248-
return vma;
2233+
return mt_find(&mm->mm_mt, &index, end_addr - 1);
22492234
}
22502235
EXPORT_SYMBOL(find_vma_intersection);
22512236

@@ -2259,19 +2244,10 @@ EXPORT_SYMBOL(find_vma_intersection);
22592244
*/
22602245
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
22612246
{
2262-
struct vm_area_struct *vma;
22632247
unsigned long index = addr;
22642248

22652249
mmap_assert_locked(mm);
2266-
/* Check the cache first. */
2267-
vma = vmacache_find(mm, addr);
2268-
if (likely(vma))
2269-
return vma;
2270-
2271-
vma = mt_find(&mm->mm_mt, &index, ULONG_MAX);
2272-
if (vma)
2273-
vmacache_update(addr, vma);
2274-
return vma;
2250+
return mt_find(&mm->mm_mt, &index, ULONG_MAX);
22752251
}
22762252
EXPORT_SYMBOL(find_vma);
22772253

@@ -2660,9 +2636,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas,
26602636
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
26612637
tail_vma->vm_next = NULL;
26622638

2663-
/* Kill the cache */
2664-
vmacache_invalidate(mm);
2665-
26662639
/*
26672640
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
26682641
* VM_GROWSUP VMA. Such VMAs can change their size under

mm/nommu.c

Lines changed: 4 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
#include <linux/export.h>
2020
#include <linux/mm.h>
2121
#include <linux/sched/mm.h>
22-
#include <linux/vmacache.h>
2322
#include <linux/mman.h>
2423
#include <linux/swap.h>
2524
#include <linux/file.h>
@@ -598,23 +597,12 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
598597
*/
599598
static void delete_vma_from_mm(struct vm_area_struct *vma)
600599
{
601-
int i;
602-
struct address_space *mapping;
603-
struct mm_struct *mm = vma->vm_mm;
604-
struct task_struct *curr = current;
605600
MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
606601

607-
mm->map_count--;
608-
for (i = 0; i < VMACACHE_SIZE; i++) {
609-
/* if the vma is cached, invalidate the entire cache */
610-
if (curr->vmacache.vmas[i] == vma) {
611-
vmacache_invalidate(mm);
612-
break;
613-
}
614-
}
615-
602+
vma->vm_mm->map_count--;
616603
/* remove the VMA from the mapping */
617604
if (vma->vm_file) {
605+
struct address_space *mapping;
618606
mapping = vma->vm_file->f_mapping;
619607

620608
i_mmap_lock_write(mapping);
@@ -626,7 +614,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
626614

627615
/* remove from the MM's tree and list */
628616
vma_mas_remove(vma, &mas);
629-
__vma_unlink_list(mm, vma);
617+
__vma_unlink_list(vma->vm_mm, vma);
630618
}
631619

632620
/*
@@ -659,20 +647,9 @@ EXPORT_SYMBOL(find_vma_intersection);
659647
*/
660648
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
661649
{
662-
struct vm_area_struct *vma;
663650
MA_STATE(mas, &mm->mm_mt, addr, addr);
664651

665-
/* check the cache first */
666-
vma = vmacache_find(mm, addr);
667-
if (likely(vma))
668-
return vma;
669-
670-
vma = mas_walk(&mas);
671-
672-
if (vma)
673-
vmacache_update(addr, vma);
674-
675-
return vma;
652+
return mas_walk(&mas);
676653
}
677654
EXPORT_SYMBOL(find_vma);
678655

@@ -706,11 +683,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
706683
unsigned long end = addr + len;
707684
MA_STATE(mas, &mm->mm_mt, addr, addr);
708685

709-
/* check the cache first */
710-
vma = vmacache_find_exact(mm, addr, end);
711-
if (vma)
712-
return vma;
713-
714686
vma = mas_walk(&mas);
715687
if (!vma)
716688
return NULL;
@@ -719,7 +691,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
719691
if (vma->vm_end != end)
720692
return NULL;
721693

722-
vmacache_update(addr, vma);
723694
return vma;
724695
}
725696

0 commit comments

Comments
 (0)