Skip to content

Commit ff075d6

Browse files
Peter Zijlstratorvalds
authored andcommitted
um: mmu_gather rework
Fix up the um mmu_gather code to conform to the new API. Signed-off-by: Peter Zijlstra <[email protected]> Cc: Jeff Dike <[email protected]> Cc: Richard Weinberger <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: David Miller <[email protected]> Cc: Martin Schwidefsky <[email protected]> Cc: Russell King <[email protected]> Cc: Paul Mundt <[email protected]> Cc: Tony Luck <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Namhyung Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 7a95a2c commit ff075d6

File tree

1 file changed

+11
-18
lines changed

1 file changed

+11
-18
lines changed

arch/um/include/asm/tlb.h

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,6 @@ struct mmu_gather {
2222
unsigned int fullmm; /* non-zero means full mm flush */
2323
};
2424

25-
/* Users of the generic TLB shootdown code must declare this storage space. */
26-
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27-
2825
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
2926
unsigned long address)
3027
{
@@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
4744
}
4845
}
4946

50-
/* tlb_gather_mmu
51-
* Return a pointer to an initialized struct mmu_gather.
52-
*/
53-
static inline struct mmu_gather *
54-
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
47+
static inline void
48+
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
5549
{
56-
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
57-
5850
tlb->mm = mm;
5951
tlb->fullmm = full_mm_flush;
6052

6153
init_tlb_gather(tlb);
62-
63-
return tlb;
6454
}
6555

6656
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
6757
unsigned long end);
6858

6959
static inline void
70-
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
60+
tlb_flush_mmu(struct mmu_gather *tlb)
7161
{
7262
if (!tlb->need_flush)
7363
return;
@@ -83,24 +73,27 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
8373
static inline void
8474
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
8575
{
86-
tlb_flush_mmu(tlb, start, end);
76+
tlb_flush_mmu(tlb);
8777

8878
/* keep the page table cache within bounds */
8979
check_pgt_cache();
90-
91-
put_cpu_var(mmu_gathers);
9280
}
9381

9482
/* tlb_remove_page
9583
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
9684
* while handling the additional races in SMP caused by other CPUs
9785
* caching valid mappings in their TLBs.
9886
*/
99-
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
87+
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
10088
{
10189
tlb->need_flush = 1;
10290
free_page_and_swap_cache(page);
103-
return;
91+
return 1; /* avoid calling tlb_flush_mmu */
92+
}
93+
94+
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95+
{
96+
__tlb_remove_page(tlb, page);
10497
}
10598

10699
/**

0 commit comments

Comments
 (0)