@@ -722,11 +722,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
722
722
*/
723
723
struct page * dax_layout_busy_page (struct address_space * mapping )
724
724
{
725
- pgoff_t indices [PAGEVEC_SIZE ];
725
+ XA_STATE (xas , & mapping -> i_pages , 0 );
726
+ void * entry ;
727
+ unsigned int scanned = 0 ;
726
728
struct page * page = NULL ;
727
- struct pagevec pvec ;
728
- pgoff_t index , end ;
729
- unsigned i ;
730
729
731
730
/*
732
731
* In the 'limited' case get_user_pages() for dax is disabled.
@@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
737
736
if (!dax_mapping (mapping ) || !mapping_mapped (mapping ))
738
737
return NULL ;
739
738
740
- pagevec_init (& pvec );
741
- index = 0 ;
742
- end = -1 ;
743
-
744
739
/*
745
740
* If we race get_user_pages_fast() here either we'll see the
746
- * elevated page count in the pagevec_lookup and wait, or
741
+ * elevated page count in the iteration and wait, or
747
742
* get_user_pages_fast() will see that the page it took a reference
748
743
* against is no longer mapped in the page tables and bail to the
749
744
* get_user_pages() slow path. The slow path is protected by
@@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
755
750
*/
756
751
unmap_mapping_range (mapping , 0 , 0 , 1 );
757
752
758
- while (index < end && pagevec_lookup_entries (& pvec , mapping , index ,
759
- min (end - index , (pgoff_t )PAGEVEC_SIZE ),
760
- indices )) {
761
- for (i = 0 ; i < pagevec_count (& pvec ); i ++ ) {
762
- struct page * pvec_ent = pvec .pages [i ];
763
- void * entry ;
764
-
765
- index = indices [i ];
766
- if (index >= end )
767
- break ;
768
-
769
- if (WARN_ON_ONCE (!xa_is_value (pvec_ent )))
770
- continue ;
771
-
772
- xa_lock_irq (& mapping -> i_pages );
773
- entry = get_unlocked_mapping_entry (mapping , index , NULL );
774
- if (entry )
775
- page = dax_busy_page (entry );
776
- put_unlocked_mapping_entry (mapping , index , entry );
777
- xa_unlock_irq (& mapping -> i_pages );
778
- if (page )
779
- break ;
780
- }
781
-
782
- /*
783
- * We don't expect normal struct page entries to exist in our
784
- * tree, but we keep these pagevec calls so that this code is
785
- * consistent with the common pattern for handling pagevecs
786
- * throughout the kernel.
787
- */
788
- pagevec_remove_exceptionals (& pvec );
789
- pagevec_release (& pvec );
790
- index ++ ;
791
-
753
+ xas_lock_irq (& xas );
754
+ xas_for_each (& xas , entry , ULONG_MAX ) {
755
+ if (WARN_ON_ONCE (!xa_is_value (entry )))
756
+ continue ;
757
+ if (unlikely (dax_is_locked (entry )))
758
+ entry = get_unlocked_entry (& xas );
759
+ if (entry )
760
+ page = dax_busy_page (entry );
761
+ put_unlocked_entry (& xas , entry );
792
762
if (page )
793
763
break ;
764
+ if (++ scanned % XA_CHECK_SCHED )
765
+ continue ;
766
+
767
+ xas_pause (& xas );
768
+ xas_unlock_irq (& xas );
769
+ cond_resched ();
770
+ xas_lock_irq (& xas );
794
771
}
772
+ xas_unlock_irq (& xas );
795
773
return page ;
796
774
}
797
775
EXPORT_SYMBOL_GPL (dax_layout_busy_page );
0 commit comments