47
47
#include <asm/machvec.h>
48
48
49
49
#ifdef CONFIG_SMP
50
- # define FREE_PTE_NR 2048
51
50
# define tlb_fast_mode (tlb ) ((tlb)->nr == ~0U)
52
51
#else
53
- # define FREE_PTE_NR 0
54
52
# define tlb_fast_mode (tlb ) (1)
55
53
#endif
56
54
55
+ /*
56
+ * If we can't allocate a page to make a big batch of page pointers
57
+ * to work on, then just handle a few from the on-stack structure.
58
+ */
59
+ #define IA64_GATHER_BUNDLE 8
60
+
57
61
struct mmu_gather {
58
62
struct mm_struct * mm ;
59
63
unsigned int nr ; /* == ~0U => fast mode */
64
+ unsigned int max ;
60
65
unsigned char fullmm ; /* non-zero means full mm flush */
61
66
unsigned char need_flush ; /* really unmapped some PTEs? */
62
67
unsigned long start_addr ;
63
68
unsigned long end_addr ;
64
- struct page * pages [FREE_PTE_NR ];
69
+ struct page * * pages ;
70
+ struct page * local [IA64_GATHER_BUNDLE ];
65
71
};
66
72
67
73
struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
90
96
#define RR_RID_MASK 0x00000000ffffff00L
91
97
#define RR_TO_RID (val ) ((val >> 8) & 0xffffff)
92
98
93
- /* Users of the generic TLB shootdown code must declare this storage space. */
94
- DECLARE_PER_CPU (struct mmu_gather , mmu_gathers );
95
-
96
99
/*
97
100
* Flush the TLB for address range START to END and, if not in fast mode, release the
98
101
* freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147
150
}
148
151
}
149
152
150
- /*
151
- * Return a pointer to an initialized struct mmu_gather.
152
- */
153
- static inline struct mmu_gather *
154
- tlb_gather_mmu (struct mm_struct * mm , unsigned int full_mm_flush )
153
+ static inline void __tlb_alloc_page (struct mmu_gather * tlb )
155
154
{
156
- struct mmu_gather * tlb = & get_cpu_var ( mmu_gathers );
155
+ unsigned long addr = __get_free_pages ( GFP_NOWAIT | __GFP_NOWARN , 0 );
157
156
157
+ if (addr ) {
158
+ tlb -> pages = (void * )addr ;
159
+ tlb -> max = PAGE_SIZE / sizeof (void * );
160
+ }
161
+ }
162
+
163
+
164
+ static inline void
165
+ tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm , unsigned int full_mm_flush )
166
+ {
158
167
tlb -> mm = mm ;
168
+ tlb -> max = ARRAY_SIZE (tlb -> local );
169
+ tlb -> pages = tlb -> local ;
159
170
/*
160
171
* Use fast mode if only 1 CPU is online.
161
172
*
@@ -172,15 +183,14 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172
183
tlb -> nr = (num_online_cpus () == 1 ) ? ~0U : 0 ;
173
184
tlb -> fullmm = full_mm_flush ;
174
185
tlb -> start_addr = ~0UL ;
175
- return tlb ;
176
186
}
177
187
178
188
/*
179
189
* Called at the end of the shootdown operation to free up any resources that were
180
190
* collected.
181
191
*/
182
192
static inline void
183
- tlb_finish_mmu (struct mmu_gather * tlb , unsigned long start , unsigned long end )
193
+ tlb_finish_mmu (struct mmu_gather * tlb , unsigned long start , unsigned long end )
184
194
{
185
195
/*
186
196
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,26 +201,42 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191
201
/* keep the page table cache within bounds */
192
202
check_pgt_cache ();
193
203
194
- put_cpu_var (mmu_gathers );
204
+ if (tlb -> pages != tlb -> local )
205
+ free_pages ((unsigned long )tlb -> pages , 0 );
195
206
}
196
207
197
208
/*
198
209
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
199
210
* must be delayed until after the TLB has been flushed (see comments at the beginning of
200
211
* this file).
201
212
*/
202
- static inline void
203
- tlb_remove_page (struct mmu_gather * tlb , struct page * page )
213
+ static inline int __tlb_remove_page (struct mmu_gather * tlb , struct page * page )
204
214
{
205
215
tlb -> need_flush = 1 ;
206
216
207
217
if (tlb_fast_mode (tlb )) {
208
218
free_page_and_swap_cache (page );
209
- return ;
219
+ return 1 ; /* avoid calling tlb_flush_mmu */
210
220
}
221
+
222
+ if (!tlb -> nr && tlb -> pages == tlb -> local )
223
+ __tlb_alloc_page (tlb );
224
+
211
225
tlb -> pages [tlb -> nr ++ ] = page ;
212
- if (tlb -> nr >= FREE_PTE_NR )
213
- ia64_tlb_flush_mmu (tlb , tlb -> start_addr , tlb -> end_addr );
226
+ VM_BUG_ON (tlb -> nr > tlb -> max );
227
+
228
+ return tlb -> max - tlb -> nr ;
229
+ }
230
+
231
+ static inline void tlb_flush_mmu (struct mmu_gather * tlb )
232
+ {
233
+ ia64_tlb_flush_mmu (tlb , tlb -> start_addr , tlb -> end_addr );
234
+ }
235
+
236
+ static inline void tlb_remove_page (struct mmu_gather * tlb , struct page * page )
237
+ {
238
+ if (!__tlb_remove_page (tlb , page ))
239
+ tlb_flush_mmu (tlb );
214
240
}
215
241
216
242
/*
0 commit comments