Skip to content

Commit a90744b

Browse files
npiggintorvalds
authored andcommitted
mm: allow arch to supply p??_free_tlb functions
The mmu_gather APIs keep track of the invalidated address range including the span covered by invalidated page table pages. Ranges covered by page tables but not ptes (and therefore no TLBs) still need to be invalidated because some architectures (x86) can cache intermediate page table entries, and invalidate those with normal TLB invalidation instructions to be almost-backward-compatible. Architectures which don't cache intermediate page table entries, or which invalidate these caches separately from TLB invalidation, do not require TLB invalidation range expanded over page tables. Allow architectures to supply their own p??_free_tlb functions, which can avoid the __tlb_adjust_range. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Nicholas Piggin <[email protected]> Reviewed-by: Andrew Morton <[email protected]> Cc: "Aneesh Kumar K. V" <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Nadav Amit <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 02f51d4 commit a90744b

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

include/asm-generic/tlb.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
265265
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
266266
*/
267267

268+
#ifndef pte_free_tlb
268269
#define pte_free_tlb(tlb, ptep, address) \
269270
do { \
270271
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
271272
__pte_free_tlb(tlb, ptep, address); \
272273
} while (0)
274+
#endif
273275

276+
#ifndef pmd_free_tlb
274277
#define pmd_free_tlb(tlb, pmdp, address) \
275278
do { \
276279
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
277280
__pmd_free_tlb(tlb, pmdp, address); \
278281
} while (0)
282+
#endif
279283

280284
#ifndef __ARCH_HAS_4LEVEL_HACK
285+
#ifndef pud_free_tlb
281286
#define pud_free_tlb(tlb, pudp, address) \
282287
do { \
283288
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
284289
__pud_free_tlb(tlb, pudp, address); \
285290
} while (0)
286291
#endif
292+
#endif
287293

288294
#ifndef __ARCH_HAS_5LEVEL_HACK
295+
#ifndef p4d_free_tlb
289296
#define p4d_free_tlb(tlb, pudp, address) \
290297
do { \
291298
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
292299
__p4d_free_tlb(tlb, pudp, address); \
293300
} while (0)
294301
#endif
302+
#endif
295303

296304
#define tlb_migrate_finish(mm) do {} while (0)
297305

0 commit comments

Comments
 (0)