Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 240aade

Browse files
walken-googletorvalds
authored andcommitted
mm: accelerate mm_populate() treatment of THP pages
This change adds a follow_page_mask function which is equivalent to follow_page, but with an extra page_mask argument. follow_page_mask sets *page_mask to HPAGE_PMD_NR - 1 when it encounters a THP page, and to 0 in other cases. __get_user_pages() makes use of this in order to accelerate populating THP ranges - that is, when both the pages and vmas arrays are NULL, we don't need to iterate HPAGE_PMD_NR times to cover a single THP page (and we also avoid taking mm->page_table_lock that many times). Signed-off-by: Michel Lespinasse <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 28a3571 commit 240aade

File tree

3 files changed

+38
-12
lines changed

3 files changed

+38
-12
lines changed

include/linux/mm.h

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1629,8 +1629,17 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
16291629
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
16301630
unsigned long pfn);
16311631

1632-
struct page *follow_page(struct vm_area_struct *, unsigned long address,
1633-
unsigned int foll_flags);
1632+
struct page *follow_page_mask(struct vm_area_struct *vma,
1633+
unsigned long address, unsigned int foll_flags,
1634+
unsigned int *page_mask);
1635+
1636+
static inline struct page *follow_page(struct vm_area_struct *vma,
1637+
unsigned long address, unsigned int foll_flags)
1638+
{
1639+
unsigned int unused_page_mask;
1640+
return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1641+
}
1642+
16341643
#define FOLL_WRITE 0x01 /* check pte is writable */
16351644
#define FOLL_TOUCH 0x02 /* mark page accessed */
16361645
#define FOLL_GET 0x04 /* do get_page on page */

mm/memory.c

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1462,19 +1462,21 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
14621462
EXPORT_SYMBOL_GPL(zap_vma_ptes);
14631463

14641464
/**
1465-
* follow_page - look up a page descriptor from a user-virtual address
1465+
* follow_page_mask - look up a page descriptor from a user-virtual address
14661466
* @vma: vm_area_struct mapping @address
14671467
* @address: virtual address to look up
14681468
* @flags: flags modifying lookup behaviour
1469+
* @page_mask: on output, *page_mask is set according to the size of the page
14691470
*
14701471
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
14711472
*
14721473
* Returns the mapped (struct page *), %NULL if no mapping exists, or
14731474
* an error pointer if there is a mapping to something not represented
14741475
* by a page descriptor (see also vm_normal_page()).
14751476
*/
1476-
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1477-
unsigned int flags)
1477+
struct page *follow_page_mask(struct vm_area_struct *vma,
1478+
unsigned long address, unsigned int flags,
1479+
unsigned int *page_mask)
14781480
{
14791481
pgd_t *pgd;
14801482
pud_t *pud;
@@ -1484,6 +1486,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
14841486
struct page *page;
14851487
struct mm_struct *mm = vma->vm_mm;
14861488

1489+
*page_mask = 0;
1490+
14871491
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
14881492
if (!IS_ERR(page)) {
14891493
BUG_ON(flags & FOLL_GET);
@@ -1530,6 +1534,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
15301534
page = follow_trans_huge_pmd(vma, address,
15311535
pmd, flags);
15321536
spin_unlock(&mm->page_table_lock);
1537+
*page_mask = HPAGE_PMD_NR - 1;
15331538
goto out;
15341539
}
15351540
} else
@@ -1684,6 +1689,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
16841689
{
16851690
long i;
16861691
unsigned long vm_flags;
1692+
unsigned int page_mask;
16871693

16881694
if (!nr_pages)
16891695
return 0;
@@ -1761,6 +1767,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
17611767
get_page(page);
17621768
}
17631769
pte_unmap(pte);
1770+
page_mask = 0;
17641771
goto next_page;
17651772
}
17661773

@@ -1778,6 +1785,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
17781785
do {
17791786
struct page *page;
17801787
unsigned int foll_flags = gup_flags;
1788+
unsigned int page_increm;
17811789

17821790
/*
17831791
* If we have a pending SIGKILL, don't keep faulting
@@ -1787,7 +1795,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
17871795
return i ? i : -ERESTARTSYS;
17881796

17891797
cond_resched();
1790-
while (!(page = follow_page(vma, start, foll_flags))) {
1798+
while (!(page = follow_page_mask(vma, start,
1799+
foll_flags, &page_mask))) {
17911800
int ret;
17921801
unsigned int fault_flags = 0;
17931802

@@ -1861,13 +1870,19 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
18611870

18621871
flush_anon_page(vma, page, start);
18631872
flush_dcache_page(page);
1873+
page_mask = 0;
18641874
}
18651875
next_page:
1866-
if (vmas)
1876+
if (vmas) {
18671877
vmas[i] = vma;
1868-
i++;
1869-
start += PAGE_SIZE;
1870-
nr_pages--;
1878+
page_mask = 0;
1879+
}
1880+
page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
1881+
if (page_increm > nr_pages)
1882+
page_increm = nr_pages;
1883+
i += page_increm;
1884+
start += page_increm * PAGE_SIZE;
1885+
nr_pages -= page_increm;
18711886
} while (nr_pages && start < vma->vm_end);
18721887
} while (nr_pages);
18731888
return i;

mm/nommu.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1819,9 +1819,11 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
18191819
return ret;
18201820
}
18211821

1822-
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1823-
unsigned int foll_flags)
1822+
struct page *follow_page_mask(struct vm_area_struct *vma,
1823+
unsigned long address, unsigned int flags,
1824+
unsigned int *page_mask)
18241825
{
1826+
*page_mask = 0;
18251827
return NULL;
18261828
}
18271829

0 commit comments

Comments
 (0)