@@ -2622,27 +2622,26 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
2622
2622
int split_huge_page_to_list (struct page * page , struct list_head * list )
2623
2623
{
2624
2624
struct folio * folio = page_folio (page );
2625
- struct page * head = & folio -> page ;
2626
- struct deferred_split * ds_queue = get_deferred_split_queue (head );
2627
- XA_STATE (xas , & head -> mapping -> i_pages , head -> index );
2625
+ struct deferred_split * ds_queue = get_deferred_split_queue (& folio -> page );
2626
+ XA_STATE (xas , & folio -> mapping -> i_pages , folio -> index );
2628
2627
struct anon_vma * anon_vma = NULL ;
2629
2628
struct address_space * mapping = NULL ;
2630
2629
int extra_pins , ret ;
2631
2630
pgoff_t end ;
2632
2631
bool is_hzp ;
2633
2632
2634
- VM_BUG_ON_PAGE (! PageLocked ( head ), head );
2635
- VM_BUG_ON_PAGE (! PageCompound ( head ), head );
2633
+ VM_BUG_ON_FOLIO (! folio_test_locked ( folio ), folio );
2634
+ VM_BUG_ON_FOLIO (! folio_test_large ( folio ), folio );
2636
2635
2637
- is_hzp = is_huge_zero_page (head );
2638
- VM_WARN_ON_ONCE_PAGE (is_hzp , head );
2636
+ is_hzp = is_huge_zero_page (& folio -> page );
2637
+ VM_WARN_ON_ONCE_FOLIO (is_hzp , folio );
2639
2638
if (is_hzp )
2640
2639
return - EBUSY ;
2641
2640
2642
- if (PageWriteback ( head ))
2641
+ if (folio_test_writeback ( folio ))
2643
2642
return - EBUSY ;
2644
2643
2645
- if (PageAnon ( head )) {
2644
+ if (folio_test_anon ( folio )) {
2646
2645
/*
2647
2646
* The caller does not necessarily hold an mmap_lock that would
2648
2647
* prevent the anon_vma disappearing so we first we take a
@@ -2651,7 +2650,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2651
2650
* is taken to serialise against parallel split or collapse
2652
2651
* operations.
2653
2652
*/
2654
- anon_vma = page_get_anon_vma (head );
2653
+ anon_vma = page_get_anon_vma (& folio -> page );
2655
2654
if (!anon_vma ) {
2656
2655
ret = - EBUSY ;
2657
2656
goto out ;
@@ -2662,7 +2661,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2662
2661
} else {
2663
2662
gfp_t gfp ;
2664
2663
2665
- mapping = head -> mapping ;
2664
+ mapping = folio -> mapping ;
2666
2665
2667
2666
/* Truncated ? */
2668
2667
if (!mapping ) {
@@ -2679,7 +2678,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2679
2678
goto out ;
2680
2679
}
2681
2680
2682
- xas_split_alloc (& xas , head , compound_order ( head ), gfp );
2681
+ xas_split_alloc (& xas , folio , folio_order ( folio ), gfp );
2683
2682
if (xas_error (& xas )) {
2684
2683
ret = xas_error (& xas );
2685
2684
goto out ;
@@ -2693,7 +2692,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2693
2692
* but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2694
2693
* which cannot be nested inside the page tree lock. So note
2695
2694
* end now: i_size itself may be changed at any moment, but
2696
- * head page lock is good enough to serialize the trimming.
2695
+ * folio lock is good enough to serialize the trimming.
2697
2696
*/
2698
2697
end = DIV_ROUND_UP (i_size_read (mapping -> host ), PAGE_SIZE );
2699
2698
if (shmem_mapping (mapping ))
@@ -2709,38 +2708,38 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2709
2708
goto out_unlock ;
2710
2709
}
2711
2710
2712
- unmap_page (head );
2711
+ unmap_page (& folio -> page );
2713
2712
2714
2713
/* block interrupt reentry in xa_lock and spinlock */
2715
2714
local_irq_disable ();
2716
2715
if (mapping ) {
2717
2716
/*
2718
- * Check if the head page is present in page cache.
2719
- * We assume all tail are present too, if head is there.
2717
+ * Check if the folio is present in page cache.
2718
+ * We assume all tail are present too, if folio is there.
2720
2719
*/
2721
2720
xas_lock (& xas );
2722
2721
xas_reset (& xas );
2723
- if (xas_load (& xas ) != head )
2722
+ if (xas_load (& xas ) != folio )
2724
2723
goto fail ;
2725
2724
}
2726
2725
2727
2726
/* Prevent deferred_split_scan() touching ->_refcount */
2728
2727
spin_lock (& ds_queue -> split_queue_lock );
2729
- if (page_ref_freeze ( head , 1 + extra_pins )) {
2730
- if (!list_empty (page_deferred_list (head ))) {
2728
+ if (folio_ref_freeze ( folio , 1 + extra_pins )) {
2729
+ if (!list_empty (page_deferred_list (& folio -> page ))) {
2731
2730
ds_queue -> split_queue_len -- ;
2732
- list_del (page_deferred_list (head ));
2731
+ list_del (page_deferred_list (& folio -> page ));
2733
2732
}
2734
2733
spin_unlock (& ds_queue -> split_queue_lock );
2735
2734
if (mapping ) {
2736
- int nr = thp_nr_pages ( head );
2735
+ int nr = folio_nr_pages ( folio );
2737
2736
2738
- xas_split (& xas , head , thp_order ( head ));
2739
- if (PageSwapBacked ( head )) {
2740
- __mod_lruvec_page_state ( head , NR_SHMEM_THPS ,
2737
+ xas_split (& xas , folio , folio_order ( folio ));
2738
+ if (folio_test_swapbacked ( folio )) {
2739
+ __lruvec_stat_mod_folio ( folio , NR_SHMEM_THPS ,
2741
2740
- nr );
2742
2741
} else {
2743
- __mod_lruvec_page_state ( head , NR_FILE_THPS ,
2742
+ __lruvec_stat_mod_folio ( folio , NR_FILE_THPS ,
2744
2743
- nr );
2745
2744
filemap_nr_thps_dec (mapping );
2746
2745
}
0 commit comments