@@ -22,9 +22,6 @@ struct mmu_gather {
22
22
unsigned int fullmm ; /* non-zero means full mm flush */
23
23
};
24
24
25
- /* Users of the generic TLB shootdown code must declare this storage space. */
26
- DECLARE_PER_CPU (struct mmu_gather , mmu_gathers );
27
-
28
25
static inline void __tlb_remove_tlb_entry (struct mmu_gather * tlb , pte_t * ptep ,
29
26
unsigned long address )
30
27
{
@@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
47
44
}
48
45
}
49
46
50
- /* tlb_gather_mmu
51
- * Return a pointer to an initialized struct mmu_gather.
52
- */
53
- static inline struct mmu_gather *
54
- tlb_gather_mmu (struct mm_struct * mm , unsigned int full_mm_flush )
47
+ static inline void
48
+ tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm , unsigned int full_mm_flush )
55
49
{
56
- struct mmu_gather * tlb = & get_cpu_var (mmu_gathers );
57
-
58
50
tlb -> mm = mm ;
59
51
tlb -> fullmm = full_mm_flush ;
60
52
61
53
init_tlb_gather (tlb );
62
-
63
- return tlb ;
64
54
}
65
55
66
56
extern void flush_tlb_mm_range (struct mm_struct * mm , unsigned long start ,
67
57
unsigned long end );
68
58
69
59
static inline void
70
- tlb_flush_mmu (struct mmu_gather * tlb , unsigned long start , unsigned long end )
60
+ tlb_flush_mmu (struct mmu_gather * tlb )
71
61
{
72
62
if (!tlb -> need_flush )
73
63
return ;
@@ -83,24 +73,27 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
83
73
static inline void
84
74
tlb_finish_mmu (struct mmu_gather * tlb , unsigned long start , unsigned long end )
85
75
{
86
- tlb_flush_mmu (tlb , start , end );
76
+ tlb_flush_mmu (tlb );
87
77
88
78
/* keep the page table cache within bounds */
89
79
check_pgt_cache ();
90
-
91
- put_cpu_var (mmu_gathers );
92
80
}
93
81
94
82
/* tlb_remove_page
95
83
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
96
84
* while handling the additional races in SMP caused by other CPUs
97
85
* caching valid mappings in their TLBs.
98
86
*/
99
- static inline void tlb_remove_page (struct mmu_gather * tlb , struct page * page )
87
+ static inline int __tlb_remove_page (struct mmu_gather * tlb , struct page * page )
100
88
{
101
89
tlb -> need_flush = 1 ;
102
90
free_page_and_swap_cache (page );
103
- return ;
91
+ return 1 ; /* avoid calling tlb_flush_mmu */
92
+ }
93
+
94
+ static inline void tlb_remove_page (struct mmu_gather * tlb , struct page * page )
95
+ {
96
+ __tlb_remove_page (tlb , page );
104
97
}
105
98
106
99
/**
0 commit comments