Skip to content

Commit 077fcf1

Browse files
kvaneeshtorvalds
authored andcommitted
mm/thp: allocate transparent hugepages on local node
This make sure that we try to allocate hugepages from local node if allowed by mempolicy. If we can't, we fallback to small page allocation based on mempolicy. This is based on the observation that allocating pages on local node is more beneficial than allocating hugepages on remote node. With this patch applied we may find transparent huge page allocation failures if the current node doesn't have enough freee hugepages. Before this patch such failures result in us retrying the allocation on other nodes in the numa node mask. [[email protected]: fix comment, add CONFIG_TRANSPARENT_HUGEPAGE dependency] Signed-off-by: Aneesh Kumar K.V <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: David Rientjes <[email protected]> Cc: Andrea Arcangeli <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 24e2716 commit 077fcf1

File tree

3 files changed

+85
-15
lines changed

3 files changed

+85
-15
lines changed

include/linux/gfp.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -335,11 +335,15 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
335335
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
336336
struct vm_area_struct *vma, unsigned long addr,
337337
int node);
338+
extern struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
339+
unsigned long addr, int order);
338340
#else
339341
#define alloc_pages(gfp_mask, order) \
340342
alloc_pages_node(numa_node_id(), gfp_mask, order)
341343
#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
342344
alloc_pages(gfp_mask, order)
345+
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
346+
alloc_pages(gfp_mask, order)
343347
#endif
344348
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
345349
#define alloc_page_vma(gfp_mask, vma, addr) \

mm/huge_memory.c

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -761,15 +761,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
761761
return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
762762
}
763763

764-
static inline struct page *alloc_hugepage_vma(int defrag,
765-
struct vm_area_struct *vma,
766-
unsigned long haddr, int nd,
767-
gfp_t extra_gfp)
768-
{
769-
return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
770-
HPAGE_PMD_ORDER, vma, haddr, nd);
771-
}
772-
773764
/* Caller must hold page table lock. */
774765
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
775766
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
@@ -790,6 +781,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
790781
unsigned long address, pmd_t *pmd,
791782
unsigned int flags)
792783
{
784+
gfp_t gfp;
793785
struct page *page;
794786
unsigned long haddr = address & HPAGE_PMD_MASK;
795787

@@ -824,8 +816,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
824816
}
825817
return 0;
826818
}
827-
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
828-
vma, haddr, numa_node_id(), 0);
819+
gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
820+
page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
829821
if (unlikely(!page)) {
830822
count_vm_event(THP_FAULT_FALLBACK);
831823
return VM_FAULT_FALLBACK;
@@ -1113,10 +1105,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
11131105
spin_unlock(ptl);
11141106
alloc:
11151107
if (transparent_hugepage_enabled(vma) &&
1116-
!transparent_hugepage_debug_cow())
1117-
new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
1118-
vma, haddr, numa_node_id(), 0);
1119-
else
1108+
!transparent_hugepage_debug_cow()) {
1109+
gfp_t gfp;
1110+
1111+
gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1112+
new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
1113+
} else
11201114
new_page = NULL;
11211115

11221116
if (unlikely(!new_page)) {

mm/mempolicy.c

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2030,6 +2030,78 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
20302030
return page;
20312031
}
20322032

2033+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2034+
/**
2035+
* alloc_hugepage_vma: Allocate a hugepage for a VMA
2036+
* @gfp:
2037+
* %GFP_USER user allocation.
2038+
* %GFP_KERNEL kernel allocations,
2039+
* %GFP_HIGHMEM highmem/user allocations,
2040+
* %GFP_FS allocation should not call back into a file system.
2041+
* %GFP_ATOMIC don't sleep.
2042+
*
2043+
* @vma: Pointer to VMA or NULL if not available.
2044+
* @addr: Virtual Address of the allocation. Must be inside the VMA.
2045+
* @order: Order of the hugepage for gfp allocation.
2046+
*
2047+
* This functions allocate a huge page from the kernel page pool and applies
2048+
* a NUMA policy associated with the VMA or the current process.
2049+
* For policy other than %MPOL_INTERLEAVE, we make sure we allocate hugepage
2050+
* only from the current node if the current node is part of the node mask.
2051+
* If we can't allocate a hugepage we fail the allocation and don' try to fallback
2052+
* to other nodes in the node mask. If the current node is not part of node mask
2053+
* or if the NUMA policy is MPOL_INTERLEAVE we use the allocator that can
2054+
* fallback to nodes in the policy node mask.
2055+
*
2056+
* When VMA is not NULL caller must hold down_read on the mmap_sem of the
2057+
* mm_struct of the VMA to prevent it from going away. Should be used for
2058+
* all allocations for pages that will be mapped into
2059+
* user space. Returns NULL when no page can be allocated.
2060+
*
2061+
* Should be called with vma->vm_mm->mmap_sem held.
2062+
*
2063+
*/
2064+
struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
2065+
unsigned long addr, int order)
2066+
{
2067+
struct page *page;
2068+
nodemask_t *nmask;
2069+
struct mempolicy *pol;
2070+
int node = numa_node_id();
2071+
unsigned int cpuset_mems_cookie;
2072+
2073+
retry_cpuset:
2074+
pol = get_vma_policy(vma, addr);
2075+
cpuset_mems_cookie = read_mems_allowed_begin();
2076+
/*
2077+
* For interleave policy, we don't worry about
2078+
* current node. Otherwise if current node is
2079+
* in nodemask, try to allocate hugepage from
2080+
* the current node. Don't fall back to other nodes
2081+
* for THP.
2082+
*/
2083+
if (unlikely(pol->mode == MPOL_INTERLEAVE))
2084+
goto alloc_with_fallback;
2085+
nmask = policy_nodemask(gfp, pol);
2086+
if (!nmask || node_isset(node, *nmask)) {
2087+
mpol_cond_put(pol);
2088+
page = alloc_pages_exact_node(node, gfp, order);
2089+
if (unlikely(!page &&
2090+
read_mems_allowed_retry(cpuset_mems_cookie)))
2091+
goto retry_cpuset;
2092+
return page;
2093+
}
2094+
alloc_with_fallback:
2095+
mpol_cond_put(pol);
2096+
/*
2097+
* if current node is not part of node mask, try
2098+
* the allocation from any node, and we can do retry
2099+
* in that case.
2100+
*/
2101+
return alloc_pages_vma(gfp, order, vma, addr, node);
2102+
}
2103+
#endif
2104+
20332105
/**
20342106
* alloc_pages_current - Allocate pages.
20352107
*

0 commit comments

Comments
 (0)