Skip to content

Commit 597e1c3

Browse files
Alex ShiH. Peter Anvin
authored andcommitted
mm/mmu_gather: enable tlb flush range in generic mmu_gather
This patch enabled the tlb flush range support in generic mmu layer. Most of arch has self tlb flush range support, like ARM/IA64 etc. X86 arch has no this support in hardware yet. But another instruction 'invlpg' can implement this function in some degree. So, enable this feather in generic layer for x86 now. and maybe useful for other archs in further. Generic mmu_gather struct is protected by micro HAVE_GENERIC_MMU_GATHER. Other archs that has flush range supported own self mmu_gather struct. So, now this change is safe for them. In future we may unify this struct and related functions on multiple archs. Thanks for Peter Zijlstra time and time reminder for multiple architecture code safe! Signed-off-by: Alex Shi <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: H. Peter Anvin <[email protected]>
1 parent 3df3212 commit 597e1c3

File tree

2 files changed

+11
-0
lines changed

2 files changed

+11
-0
lines changed

include/asm-generic/tlb.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,8 @@ struct mmu_gather {
8686
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
8787
struct mmu_table_batch *batch;
8888
#endif
89+
unsigned long start;
90+
unsigned long end;
8991
unsigned int need_flush : 1, /* Did free PTEs */
9092
fast_mode : 1; /* No batching */
9193

mm/memory.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
206206
tlb->mm = mm;
207207

208208
tlb->fullmm = fullmm;
209+
tlb->start = -1UL;
210+
tlb->end = 0;
209211
tlb->need_flush = 0;
210212
tlb->fast_mode = (num_possible_cpus() == 1);
211213
tlb->local.next = NULL;
@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
248250
{
249251
struct mmu_gather_batch *batch, *next;
250252

253+
tlb->start = start;
254+
tlb->end = end;
251255
tlb_flush_mmu(tlb);
252256

253257
/* keep the page table cache within bounds */
@@ -1204,6 +1208,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
12041208
*/
12051209
if (force_flush) {
12061210
force_flush = 0;
1211+
1212+
#ifdef HAVE_GENERIC_MMU_GATHER
1213+
tlb->start = addr;
1214+
tlb->end = end;
1215+
#endif
12071216
tlb_flush_mmu(tlb);
12081217
if (addr != end)
12091218
goto again;

0 commit comments

Comments
 (0)