Skip to content

Commit 7a95a2c

Browse files
Peter Zijlstratorvalds
authored andcommitted
ia64: mmu_gather rework
Fix up the ia64 mmu_gather code to conform to the new API. Signed-off-by: Peter Zijlstra <[email protected]> Acked-by: Tony Luck <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: David Miller <[email protected]> Cc: Martin Schwidefsky <[email protected]> Cc: Russell King <[email protected]> Cc: Paul Mundt <[email protected]> Cc: Jeff Dike <[email protected]> Cc: Richard Weinberger <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Namhyung Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 1e56a56 commit 7a95a2c

File tree

1 file changed

+46
-20
lines changed
  • arch/ia64/include/asm

1 file changed

+46
-20
lines changed

arch/ia64/include/asm/tlb.h

Lines changed: 46 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -47,21 +47,27 @@
4747
#include <asm/machvec.h>
4848

4949
#ifdef CONFIG_SMP
50-
# define FREE_PTE_NR 2048
5150
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
5251
#else
53-
# define FREE_PTE_NR 0
5452
# define tlb_fast_mode(tlb) (1)
5553
#endif
5654

55+
/*
56+
* If we can't allocate a page to make a big batch of page pointers
57+
* to work on, then just handle a few from the on-stack structure.
58+
*/
59+
#define IA64_GATHER_BUNDLE 8
60+
5761
struct mmu_gather {
5862
struct mm_struct *mm;
5963
unsigned int nr; /* == ~0U => fast mode */
64+
unsigned int max;
6065
unsigned char fullmm; /* non-zero means full mm flush */
6166
unsigned char need_flush; /* really unmapped some PTEs? */
6267
unsigned long start_addr;
6368
unsigned long end_addr;
64-
struct page *pages[FREE_PTE_NR];
69+
struct page **pages;
70+
struct page *local[IA64_GATHER_BUNDLE];
6571
};
6672

6773
struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
9096
#define RR_RID_MASK 0x00000000ffffff00L
9197
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
9298

93-
/* Users of the generic TLB shootdown code must declare this storage space. */
94-
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
95-
9699
/*
97100
* Flush the TLB for address range START to END and, if not in fast mode, release the
98101
* freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147150
}
148151
}
149152

150-
/*
151-
* Return a pointer to an initialized struct mmu_gather.
152-
*/
153-
static inline struct mmu_gather *
154-
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
153+
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
155154
{
156-
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
155+
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
157156

157+
if (addr) {
158+
tlb->pages = (void *)addr;
159+
tlb->max = PAGE_SIZE / sizeof(void *);
160+
}
161+
}
162+
163+
164+
static inline void
165+
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166+
{
158167
tlb->mm = mm;
168+
tlb->max = ARRAY_SIZE(tlb->local);
169+
tlb->pages = tlb->local;
159170
/*
160171
* Use fast mode if only 1 CPU is online.
161172
*
@@ -172,15 +183,14 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172183
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173184
tlb->fullmm = full_mm_flush;
174185
tlb->start_addr = ~0UL;
175-
return tlb;
176186
}
177187

178188
/*
179189
* Called at the end of the shootdown operation to free up any resources that were
180190
* collected.
181191
*/
182192
static inline void
183-
tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
193+
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
184194
{
185195
/*
186196
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,26 +201,42 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191201
/* keep the page table cache within bounds */
192202
check_pgt_cache();
193203

194-
put_cpu_var(mmu_gathers);
204+
if (tlb->pages != tlb->local)
205+
free_pages((unsigned long)tlb->pages, 0);
195206
}
196207

197208
/*
198209
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
199210
* must be delayed until after the TLB has been flushed (see comments at the beginning of
200211
* this file).
201212
*/
202-
static inline void
203-
tlb_remove_page (struct mmu_gather *tlb, struct page *page)
213+
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
204214
{
205215
tlb->need_flush = 1;
206216

207217
if (tlb_fast_mode(tlb)) {
208218
free_page_and_swap_cache(page);
209-
return;
219+
return 1; /* avoid calling tlb_flush_mmu */
210220
}
221+
222+
if (!tlb->nr && tlb->pages == tlb->local)
223+
__tlb_alloc_page(tlb);
224+
211225
tlb->pages[tlb->nr++] = page;
212-
if (tlb->nr >= FREE_PTE_NR)
213-
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
226+
VM_BUG_ON(tlb->nr > tlb->max);
227+
228+
return tlb->max - tlb->nr;
229+
}
230+
231+
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232+
{
233+
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234+
}
235+
236+
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237+
{
238+
if (!__tlb_remove_page(tlb, page))
239+
tlb_flush_mmu(tlb);
214240
}
215241

216242
/*

0 commit comments

Comments
 (0)