Skip to content

Commit 3e9a13d

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
huge_memory: convert split_huge_page_to_list() to use a folio
Saves many calls to compound_head(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent c33db29 commit 3e9a13d

File tree

1 file changed

+24
-25
lines changed

1 file changed

+24
-25
lines changed

mm/huge_memory.c

Lines changed: 24 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2622,27 +2622,26 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
26222622
int split_huge_page_to_list(struct page *page, struct list_head *list)
26232623
{
26242624
struct folio *folio = page_folio(page);
2625-
struct page *head = &folio->page;
2626-
struct deferred_split *ds_queue = get_deferred_split_queue(head);
2627-
XA_STATE(xas, &head->mapping->i_pages, head->index);
2625+
struct deferred_split *ds_queue = get_deferred_split_queue(&folio->page);
2626+
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
26282627
struct anon_vma *anon_vma = NULL;
26292628
struct address_space *mapping = NULL;
26302629
int extra_pins, ret;
26312630
pgoff_t end;
26322631
bool is_hzp;
26332632

2634-
VM_BUG_ON_PAGE(!PageLocked(head), head);
2635-
VM_BUG_ON_PAGE(!PageCompound(head), head);
2633+
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2634+
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
26362635

2637-
is_hzp = is_huge_zero_page(head);
2638-
VM_WARN_ON_ONCE_PAGE(is_hzp, head);
2636+
is_hzp = is_huge_zero_page(&folio->page);
2637+
VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
26392638
if (is_hzp)
26402639
return -EBUSY;
26412640

2642-
if (PageWriteback(head))
2641+
if (folio_test_writeback(folio))
26432642
return -EBUSY;
26442643

2645-
if (PageAnon(head)) {
2644+
if (folio_test_anon(folio)) {
26462645
/*
26472646
* The caller does not necessarily hold an mmap_lock that would
26482647
* prevent the anon_vma disappearing so we first we take a
@@ -2651,7 +2650,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
26512650
* is taken to serialise against parallel split or collapse
26522651
* operations.
26532652
*/
2654-
anon_vma = page_get_anon_vma(head);
2653+
anon_vma = page_get_anon_vma(&folio->page);
26552654
if (!anon_vma) {
26562655
ret = -EBUSY;
26572656
goto out;
@@ -2662,7 +2661,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
26622661
} else {
26632662
gfp_t gfp;
26642663

2665-
mapping = head->mapping;
2664+
mapping = folio->mapping;
26662665

26672666
/* Truncated ? */
26682667
if (!mapping) {
@@ -2679,7 +2678,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
26792678
goto out;
26802679
}
26812680

2682-
xas_split_alloc(&xas, head, compound_order(head), gfp);
2681+
xas_split_alloc(&xas, folio, folio_order(folio), gfp);
26832682
if (xas_error(&xas)) {
26842683
ret = xas_error(&xas);
26852684
goto out;
@@ -2693,7 +2692,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
26932692
* but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
26942693
* which cannot be nested inside the page tree lock. So note
26952694
* end now: i_size itself may be changed at any moment, but
2696-
* head page lock is good enough to serialize the trimming.
2695+
* folio lock is good enough to serialize the trimming.
26972696
*/
26982697
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
26992698
if (shmem_mapping(mapping))
@@ -2709,38 +2708,38 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
27092708
goto out_unlock;
27102709
}
27112710

2712-
unmap_page(head);
2711+
unmap_page(&folio->page);
27132712

27142713
/* block interrupt reentry in xa_lock and spinlock */
27152714
local_irq_disable();
27162715
if (mapping) {
27172716
/*
2718-
* Check if the head page is present in page cache.
2719-
* We assume all tail are present too, if head is there.
2717+
* Check if the folio is present in page cache.
2718+
* We assume all tail are present too, if folio is there.
27202719
*/
27212720
xas_lock(&xas);
27222721
xas_reset(&xas);
2723-
if (xas_load(&xas) != head)
2722+
if (xas_load(&xas) != folio)
27242723
goto fail;
27252724
}
27262725

27272726
/* Prevent deferred_split_scan() touching ->_refcount */
27282727
spin_lock(&ds_queue->split_queue_lock);
2729-
if (page_ref_freeze(head, 1 + extra_pins)) {
2730-
if (!list_empty(page_deferred_list(head))) {
2728+
if (folio_ref_freeze(folio, 1 + extra_pins)) {
2729+
if (!list_empty(page_deferred_list(&folio->page))) {
27312730
ds_queue->split_queue_len--;
2732-
list_del(page_deferred_list(head));
2731+
list_del(page_deferred_list(&folio->page));
27332732
}
27342733
spin_unlock(&ds_queue->split_queue_lock);
27352734
if (mapping) {
2736-
int nr = thp_nr_pages(head);
2735+
int nr = folio_nr_pages(folio);
27372736

2738-
xas_split(&xas, head, thp_order(head));
2739-
if (PageSwapBacked(head)) {
2740-
__mod_lruvec_page_state(head, NR_SHMEM_THPS,
2737+
xas_split(&xas, folio, folio_order(folio));
2738+
if (folio_test_swapbacked(folio)) {
2739+
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
27412740
-nr);
27422741
} else {
2743-
__mod_lruvec_page_state(head, NR_FILE_THPS,
2742+
__lruvec_stat_mod_folio(folio, NR_FILE_THPS,
27442743
-nr);
27452744
filemap_nr_thps_dec(mapping);
27462745
}

0 commit comments

Comments
 (0)