Skip to content

Commit 6bc56a4

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: add vma_alloc_zeroed_movable_folio()
Replace alloc_zeroed_user_highpage_movable(). The main difference is returning a folio containing a single page instead of returning the page, but take the opportunity to rename the function to match other allocation functions a little better and rewrite the documentation to place more emphasis on the zeroing rather than the highmem aspect. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent c5792d9 commit 6bc56a4

File tree

9 files changed

+44
-47
lines changed

9 files changed

+44
-47
lines changed

arch/alpha/include/asm/page.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,8 @@
1717
extern void clear_page(void *page);
1818
#define clear_user_page(page, vaddr, pg) clear_page(page)
1919

20-
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
21-
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
22-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
20+
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
21+
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
2322

2423
extern void copy_page(void * _to, void * _from);
2524
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)

arch/arm64/include/asm/page.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,9 @@ void copy_user_highpage(struct page *to, struct page *from,
2929
void copy_highpage(struct page *to, struct page *from);
3030
#define __HAVE_ARCH_COPY_HIGHPAGE
3131

32-
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
32+
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
3333
unsigned long vaddr);
34-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
34+
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
3535

3636
void tag_clear_highpage(struct page *to);
3737
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE

arch/arm64/mm/fault.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -925,7 +925,7 @@ NOKPROBE_SYMBOL(do_debug_exception);
925925
/*
926926
* Used during anonymous page fault handling.
927927
*/
928-
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
928+
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
929929
unsigned long vaddr)
930930
{
931931
gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
@@ -938,7 +938,7 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
938938
if (vma->vm_flags & VM_MTE)
939939
flags |= __GFP_ZEROTAGS;
940940

941-
return alloc_page_vma(flags, vma, vaddr);
941+
return vma_alloc_folio(flags, 0, vma, vaddr, false);
942942
}
943943

944944
void tag_clear_highpage(struct page *page)

arch/ia64/include/asm/page.h

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -82,17 +82,15 @@ do { \
8282
} while (0)
8383

8484

85-
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
85+
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
8686
({ \
87-
struct page *page = alloc_page_vma( \
88-
GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
89-
if (page) \
90-
flush_dcache_page(page); \
91-
page; \
87+
struct folio *folio = vma_alloc_folio( \
88+
GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
89+
if (folio) \
90+
flush_dcache_folio(folio); \
91+
folio; \
9292
})
9393

94-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
95-
9694
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
9795

9896
#include <asm-generic/memory_model.h>

arch/m68k/include/asm/page_no.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,8 @@ extern unsigned long memory_end;
1313
#define clear_user_page(page, vaddr, pg) clear_page(page)
1414
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
1515

16-
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
17-
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
18-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
16+
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
17+
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
1918

2019
#define __pa(vaddr) ((unsigned long)(vaddr))
2120
#define __va(paddr) ((void *)((unsigned long)(paddr)))

arch/s390/include/asm/page.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,8 @@ static inline void copy_page(void *to, void *from)
7373
#define clear_user_page(page, vaddr, pg) clear_page(page)
7474
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
7575

76-
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
77-
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
78-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
76+
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
77+
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
7978

8079
/*
8180
* These are used to make use of C type-checking..

arch/x86/include/asm/page.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
3434
copy_page(to, from);
3535
}
3636

37-
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
38-
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
39-
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
37+
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
38+
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
4039

4140
#ifndef __pa
4241
#define __pa(x) __phys_addr((unsigned long)(x))

include/linux/highmem.h

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -207,31 +207,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
207207
}
208208
#endif
209209

210-
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
210+
#ifndef vma_alloc_zeroed_movable_folio
211211
/**
212-
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
213-
* @vma: The VMA the page is to be allocated for
214-
* @vaddr: The virtual address the page will be inserted into
212+
* vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
213+
* @vma: The VMA the page is to be allocated for.
214+
* @vaddr: The virtual address the page will be inserted into.
215215
*
216-
* Returns: The allocated and zeroed HIGHMEM page
216+
* This function will allocate a page suitable for inserting into this
217+
* VMA at this virtual address. It may be allocated from highmem or
218+
* the movable zone. An architecture may provide its own implementation.
217219
*
218-
* This function will allocate a page for a VMA that the caller knows will
219-
* be able to migrate in the future using move_pages() or reclaimed
220-
*
221-
* An architecture may override this function by defining
222-
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
223-
* implementation.
220+
* Return: A folio containing one allocated and zeroed page or NULL if
221+
* we are out of memory.
224222
*/
225-
static inline struct page *
226-
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
223+
static inline
224+
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
227225
unsigned long vaddr)
228226
{
229-
struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
227+
struct folio *folio;
230228

231-
if (page)
232-
clear_user_highpage(page, vaddr);
229+
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
230+
if (folio)
231+
clear_user_highpage(&folio->page, vaddr);
233232

234-
return page;
233+
return folio;
235234
}
236235
#endif
237236

mm/memory.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3056,10 +3056,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
30563056
goto oom;
30573057

30583058
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3059-
new_page = alloc_zeroed_user_highpage_movable(vma,
3060-
vmf->address);
3061-
if (!new_page)
3059+
struct folio *new_folio;
3060+
3061+
new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
3062+
if (!new_folio)
30623063
goto oom;
3064+
new_page = &new_folio->page;
30633065
} else {
30643066
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
30653067
vmf->address);
@@ -3995,6 +3997,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
39953997
{
39963998
struct vm_area_struct *vma = vmf->vma;
39973999
struct page *page;
4000+
struct folio *folio;
39984001
vm_fault_t ret = 0;
39994002
pte_t entry;
40004003

@@ -4044,11 +4047,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
40444047
/* Allocate our own private page. */
40454048
if (unlikely(anon_vma_prepare(vma)))
40464049
goto oom;
4047-
page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
4048-
if (!page)
4050+
folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4051+
if (!folio)
40494052
goto oom;
40504053

4051-
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
4054+
page = &folio->page;
4055+
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
40524056
goto oom_free_page;
40534057
cgroup_throttle_swaprate(page, GFP_KERNEL);
40544058

0 commit comments

Comments
 (0)