Skip to content

Commit ed48e87

Browse files
rpedgecoakpm00
authored andcommitted
thp: add thp_get_unmapped_area_vmflags()
When memory is being placed, mmap() will take care to respect the guard gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and VM_GROWSDOWN). In order to ensure guard gaps between mappings, mmap() needs to consider two things: 1. That the new mapping isn't placed in an any existing mappings guard gaps. 2. That the new mapping isn't placed such that any existing mappings are not in *its* guard gaps. The longstanding behavior of mmap() is to ensure 1, but not take any care around 2. So for example, if there is a PAGE_SIZE free area, and a mmap() with a PAGE_SIZE size, and a type that has a guard gap is being placed, mmap() may place the shadow stack in the PAGE_SIZE free area. Then the mapping that is supposed to have a guard gap will not have a gap to the adjacent VMA. Add a THP implementations of the vm_flags variant of get_unmapped_area(). Future changes will call this from mmap.c in the do_mmap() path to allow shadow stacks to be placed with consideration taken for the start guard gap. Shadow stack memory is always private and anonymous and so special guard gap logic is not needed in a lot of caseis, but it can be mapped by THP, so needs to be handled. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Rick Edgecombe <[email protected]> Reviewed-by: Christophe Leroy <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Borislav Petkov (AMD) <[email protected]> Cc: Dan Williams <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Deepak Gupta <[email protected]> Cc: Guo Ren <[email protected]> Cc: Helge Deller <[email protected]> Cc: H. Peter Anvin (Intel) <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: "James E.J. Bottomley" <[email protected]> Cc: Kees Cook <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Liam R. Howlett <[email protected]> Cc: Mark Brown <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Naveen N. Rao <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8a0fe56 commit ed48e87

File tree

3 files changed

+34
-12
lines changed

3 files changed

+34
-12
lines changed

include/linux/huge_mm.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,9 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
270270

271271
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
272272
unsigned long len, unsigned long pgoff, unsigned long flags);
273+
unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
274+
unsigned long len, unsigned long pgoff, unsigned long flags,
275+
vm_flags_t vm_flags);
273276

274277
bool can_split_folio(struct folio *folio, int *pextra_pins);
275278
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
@@ -413,6 +416,14 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
413416

414417
#define thp_get_unmapped_area NULL
415418

419+
static inline unsigned long
420+
thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
421+
unsigned long len, unsigned long pgoff,
422+
unsigned long flags, vm_flags_t vm_flags)
423+
{
424+
return 0;
425+
}
426+
416427
static inline bool
417428
can_split_folio(struct folio *folio, int *pextra_pins)
418429
{

mm/huge_memory.c

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -800,7 +800,8 @@ static inline bool is_transparent_hugepage(const struct folio *folio)
800800

801801
static unsigned long __thp_get_unmapped_area(struct file *filp,
802802
unsigned long addr, unsigned long len,
803-
loff_t off, unsigned long flags, unsigned long size)
803+
loff_t off, unsigned long flags, unsigned long size,
804+
vm_flags_t vm_flags)
804805
{
805806
loff_t off_end = off + len;
806807
loff_t off_align = round_up(off, size);
@@ -816,8 +817,8 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
816817
if (len_pad < len || (off + len_pad) < off)
817818
return 0;
818819

819-
ret = mm_get_unmapped_area(current->mm, filp, addr, len_pad,
820-
off >> PAGE_SHIFT, flags);
820+
ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
821+
off >> PAGE_SHIFT, flags, vm_flags);
821822

822823
/*
823824
* The failure might be due to length padding. The caller will retry
@@ -842,17 +843,25 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
842843
return ret;
843844
}
844845

845-
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
846-
unsigned long len, unsigned long pgoff, unsigned long flags)
846+
unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
847+
unsigned long len, unsigned long pgoff, unsigned long flags,
848+
vm_flags_t vm_flags)
847849
{
848850
unsigned long ret;
849851
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
850852

851-
ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
853+
ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
852854
if (ret)
853855
return ret;
854856

855-
return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
857+
return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
858+
vm_flags);
859+
}
860+
861+
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
862+
unsigned long len, unsigned long pgoff, unsigned long flags)
863+
{
864+
return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
856865
}
857866
EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
858867

mm/mmap.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1860,20 +1860,22 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
18601860
* so use shmem's get_unmapped_area in case it can be huge.
18611861
*/
18621862
get_area = shmem_get_unmapped_area;
1863-
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1864-
/* Ensures that larger anonymous mappings are THP aligned. */
1865-
get_area = thp_get_unmapped_area;
18661863
}
18671864

18681865
/* Always treat pgoff as zero for anonymous memory. */
18691866
if (!file)
18701867
pgoff = 0;
18711868

1872-
if (get_area)
1869+
if (get_area) {
18731870
addr = get_area(file, addr, len, pgoff, flags);
1874-
else
1871+
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1872+
/* Ensures that larger anonymous mappings are THP aligned. */
1873+
addr = thp_get_unmapped_area_vmflags(file, addr, len,
1874+
pgoff, flags, vm_flags);
1875+
} else {
18751876
addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
18761877
pgoff, flags, vm_flags);
1878+
}
18771879
if (IS_ERR_VALUE(addr))
18781880
return addr;
18791881

0 commit comments

Comments
 (0)