Skip to content

Commit 0318e5a

Browse files
kirylIngo Molnar
authored andcommitted
x86/mm/gup: Add 5-level paging support
Extend get_user_pages_fast() to handle an additional page table level. Signed-off-by: Kirill A. Shutemov <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent e0c4f67 commit 0318e5a

File tree

1 file changed

+27
-6
lines changed

1 file changed

+27
-6
lines changed

arch/x86/mm/gup.c

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,9 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
7676
}
7777

7878
/*
79-
* 'pteval' can come from a pte, pmd or pud. We only check
79+
* 'pteval' can come from a pte, pmd, pud or p4d. We only check
8080
* _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
81-
* same value on all 3 types.
81+
* same value on all 4 types.
8282
*/
8383
static inline int pte_allows_gup(unsigned long pteval, int write)
8484
{
@@ -295,13 +295,13 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
295295
return 1;
296296
}
297297

298-
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
298+
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
299299
int write, struct page **pages, int *nr)
300300
{
301301
unsigned long next;
302302
pud_t *pudp;
303303

304-
pudp = pud_offset(&pgd, addr);
304+
pudp = pud_offset(&p4d, addr);
305305
do {
306306
pud_t pud = *pudp;
307307

@@ -320,6 +320,27 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
320320
return 1;
321321
}
322322

323+
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
324+
int write, struct page **pages, int *nr)
325+
{
326+
unsigned long next;
327+
p4d_t *p4dp;
328+
329+
p4dp = p4d_offset(&pgd, addr);
330+
do {
331+
p4d_t p4d = *p4dp;
332+
333+
next = p4d_addr_end(addr, end);
334+
if (p4d_none(p4d))
335+
return 0;
336+
BUILD_BUG_ON(p4d_large(p4d));
337+
if (!gup_pud_range(p4d, addr, next, write, pages, nr))
338+
return 0;
339+
} while (p4dp++, addr = next, addr != end);
340+
341+
return 1;
342+
}
343+
323344
/*
324345
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
325346
* back to the regular GUP.
@@ -368,7 +389,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
368389
next = pgd_addr_end(addr, end);
369390
if (pgd_none(pgd))
370391
break;
371-
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
392+
if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
372393
break;
373394
} while (pgdp++, addr = next, addr != end);
374395
local_irq_restore(flags);
@@ -440,7 +461,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
440461
next = pgd_addr_end(addr, end);
441462
if (pgd_none(pgd))
442463
goto slow;
443-
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
464+
if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
444465
goto slow;
445466
} while (pgdp++, addr = next, addr != end);
446467
local_irq_enable();

0 commit comments

Comments
 (0)