Skip to content

Commit ed61406

Browse files
sidkumar99Sasha Levin
authored andcommitted
mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios
[ Upstream commit d4ab031 ] Continue to use a folio inside free_huge_page() by converting hugetlb_cgroup_uncharge_page*() to folios. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Sidhartha Kumar <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Reviewed-by: Muchun Song <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Bui Quang Minh <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Mina Almasry <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Stable-dep-of: b76b469 ("mm/hugetlb: fix missing hugetlb_lock for resv uncharge") Signed-off-by: Sasha Levin <[email protected]>
1 parent 29f671b commit ed61406

File tree

3 files changed

+27
-25
lines changed

3 files changed

+27
-25
lines changed

include/linux/hugetlb_cgroup.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
158158
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
159159
struct hugetlb_cgroup *h_cg,
160160
struct page *page);
161-
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
162-
struct page *page);
163-
extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
164-
struct page *page);
161+
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
162+
struct folio *folio);
163+
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
164+
struct folio *folio);
165165

166166
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
167167
struct hugetlb_cgroup *h_cg);
@@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
254254
{
255255
}
256256

257-
static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
258-
struct page *page)
257+
static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
258+
struct folio *folio)
259259
{
260260
}
261261

262-
static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
262+
static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
263263
unsigned long nr_pages,
264-
struct page *page)
264+
struct folio *folio)
265265
{
266266
}
267267
static inline void hugetlb_cgroup_uncharge_cgroup(int idx,

mm/hugetlb.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1956,10 +1956,10 @@ void free_huge_page(struct page *page)
19561956

19571957
spin_lock_irqsave(&hugetlb_lock, flags);
19581958
folio_clear_hugetlb_migratable(folio);
1959-
hugetlb_cgroup_uncharge_page(hstate_index(h),
1960-
pages_per_huge_page(h), page);
1961-
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1962-
pages_per_huge_page(h), page);
1959+
hugetlb_cgroup_uncharge_folio(hstate_index(h),
1960+
pages_per_huge_page(h), folio);
1961+
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1962+
pages_per_huge_page(h), folio);
19631963
if (restore_reserve)
19641964
h->resv_huge_pages++;
19651965

@@ -3082,6 +3082,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
30823082
struct hugepage_subpool *spool = subpool_vma(vma);
30833083
struct hstate *h = hstate_vma(vma);
30843084
struct page *page;
3085+
struct folio *folio;
30853086
long map_chg, map_commit;
30863087
long gbl_chg;
30873088
int ret, idx;
@@ -3145,6 +3146,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
31453146
* a reservation exists for the allocation.
31463147
*/
31473148
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
3149+
31483150
if (!page) {
31493151
spin_unlock_irq(&hugetlb_lock);
31503152
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
@@ -3159,6 +3161,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
31593161
set_page_refcounted(page);
31603162
/* Fall through */
31613163
}
3164+
folio = page_folio(page);
31623165
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
31633166
/* If allocation is not consuming a reservation, also store the
31643167
* hugetlb_cgroup pointer on the page.
@@ -3188,8 +3191,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
31883191
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
31893192
hugetlb_acct_memory(h, -rsv_adjust);
31903193
if (deferred_reserve)
3191-
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
3192-
pages_per_huge_page(h), page);
3194+
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3195+
pages_per_huge_page(h), folio);
31933196
}
31943197
return page;
31953198

mm/hugetlb_cgroup.c

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -346,11 +346,10 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
346346
/*
347347
* Should be called with hugetlb_lock held
348348
*/
349-
static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
350-
struct page *page, bool rsvd)
349+
static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
350+
struct folio *folio, bool rsvd)
351351
{
352352
struct hugetlb_cgroup *h_cg;
353-
struct folio *folio = page_folio(page);
354353

355354
if (hugetlb_cgroup_disabled())
356355
return;
@@ -368,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
368367
css_put(&h_cg->css);
369368
else {
370369
unsigned long usage =
371-
h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
370+
h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
372371
/*
373372
* This write is not atomic due to fetching usage and writing
374373
* to it, but that's fine because we call this with
375374
* hugetlb_lock held anyway.
376375
*/
377-
WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
376+
WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
378377
usage - nr_pages);
379378
}
380379
}
381380

382-
void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
383-
struct page *page)
381+
void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
382+
struct folio *folio)
384383
{
385-
__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
384+
__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
386385
}
387386

388-
void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
389-
struct page *page)
387+
void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
388+
struct folio *folio)
390389
{
391-
__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
390+
__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
392391
}
393392

394393
static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,

0 commit comments

Comments
 (0)