Skip to content

Commit 08cf9fa

Browse files
minatorvalds
authored andcommitted
hugetlb_cgroup: support noreserve mappings
Support MAP_NORESERVE accounting as part of the new counter. For each hugepage allocation, at allocation time we check if there is a reservation for this allocation or not. If there is a reservation for this allocation, then this allocation was charged at reservation time, and we don't re-account it. If there is no reserevation for this allocation, we charge the appropriate hugetlb_cgroup. The hugetlb_cgroup to uncharge for this allocation is stored in page[3].private. We use new APIs added in an earlier patch to set this pointer. Signed-off-by: Mina Almasry <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Cc: David Rientjes <[email protected]> Cc: Greg Thelen <[email protected]> Cc: Sandipan Das <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Shuah Khan <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 075a61d commit 08cf9fa

File tree

1 file changed

+26
-1
lines changed

1 file changed

+26
-1
lines changed

mm/hugetlb.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1345,6 +1345,8 @@ static void __free_huge_page(struct page *page)
13451345
clear_page_huge_active(page);
13461346
hugetlb_cgroup_uncharge_page(hstate_index(h),
13471347
pages_per_huge_page(h), page);
1348+
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1349+
pages_per_huge_page(h), page);
13481350
if (restore_reserve)
13491351
h->resv_huge_pages++;
13501352

@@ -2281,6 +2283,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
22812283
long gbl_chg;
22822284
int ret, idx;
22832285
struct hugetlb_cgroup *h_cg;
2286+
bool deferred_reserve;
22842287

22852288
idx = hstate_index(h);
22862289
/*
@@ -2318,9 +2321,19 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
23182321
gbl_chg = 1;
23192322
}
23202323

2324+
/* If this allocation is not consuming a reservation, charge it now.
2325+
*/
2326+
deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
2327+
if (deferred_reserve) {
2328+
ret = hugetlb_cgroup_charge_cgroup_rsvd(
2329+
idx, pages_per_huge_page(h), &h_cg);
2330+
if (ret)
2331+
goto out_subpool_put;
2332+
}
2333+
23212334
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
23222335
if (ret)
2323-
goto out_subpool_put;
2336+
goto out_uncharge_cgroup_reservation;
23242337

23252338
spin_lock(&hugetlb_lock);
23262339
/*
@@ -2343,6 +2356,14 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
23432356
/* Fall through */
23442357
}
23452358
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2359+
/* If allocation is not consuming a reservation, also store the
2360+
* hugetlb_cgroup pointer on the page.
2361+
*/
2362+
if (deferred_reserve) {
2363+
hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2364+
h_cg, page);
2365+
}
2366+
23462367
spin_unlock(&hugetlb_lock);
23472368

23482369
set_page_private(page, (unsigned long)spool);
@@ -2367,6 +2388,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
23672388

23682389
out_uncharge_cgroup:
23692390
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2391+
out_uncharge_cgroup_reservation:
2392+
if (deferred_reserve)
2393+
hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2394+
h_cg);
23702395
out_subpool_put:
23712396
if (map_chg || avoid_reserve)
23722397
hugepage_subpool_put_pages(spool, 1);

0 commit comments

Comments
 (0)