@@ -23,8 +23,6 @@ struct mmu_gather {
23
23
unsigned long start , end ;
24
24
};
25
25
26
- DECLARE_PER_CPU (struct mmu_gather , mmu_gathers );
27
-
28
26
static inline void init_tlb_gather (struct mmu_gather * tlb )
29
27
{
30
28
tlb -> start = TASK_SIZE ;
@@ -36,17 +34,13 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36
34
}
37
35
}
38
36
39
- static inline struct mmu_gather *
40
- tlb_gather_mmu (struct mm_struct * mm , unsigned int full_mm_flush )
37
+ static inline void
38
+ tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm , unsigned int full_mm_flush )
41
39
{
42
- struct mmu_gather * tlb = & get_cpu_var (mmu_gathers );
43
-
44
40
tlb -> mm = mm ;
45
41
tlb -> fullmm = full_mm_flush ;
46
42
47
43
init_tlb_gather (tlb );
48
-
49
- return tlb ;
50
44
}
51
45
52
46
static inline void
@@ -57,8 +51,6 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
57
51
58
52
/* keep the page table cache within bounds */
59
53
check_pgt_cache ();
60
-
61
- put_cpu_var (mmu_gathers );
62
54
}
63
55
64
56
static inline void
@@ -91,7 +83,21 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
91
83
}
92
84
}
93
85
94
- #define tlb_remove_page (tlb ,page ) free_page_and_swap_cache(page)
86
+ static inline void tlb_flush_mmu (struct mmu_gather * tlb )
87
+ {
88
+ }
89
+
90
+ static inline int __tlb_remove_page (struct mmu_gather * tlb , struct page * page )
91
+ {
92
+ free_page_and_swap_cache (page );
93
+ return 1 ; /* avoid calling tlb_flush_mmu */
94
+ }
95
+
96
+ static inline void tlb_remove_page (struct mmu_gather * tlb , struct page * page )
97
+ {
98
+ __tlb_remove_page (tlb , page );
99
+ }
100
+
95
101
#define pte_free_tlb (tlb , ptep , addr ) pte_free((tlb)->mm, ptep)
96
102
#define pmd_free_tlb (tlb , pmdp , addr ) pmd_free((tlb)->mm, pmdp)
97
103
#define pud_free_tlb (tlb , pudp , addr ) pud_free((tlb)->mm, pudp)
0 commit comments