Skip to content

Commit 084a899

Browse files
author
Matthew Wilcox
committed
dax: Convert dax_layout_busy_page to XArray
Instead of using a pagevec, just use the XArray iterators. Add a conditional rescheduling point which probably should have been there in the original. Signed-off-by: Matthew Wilcox <[email protected]>
1 parent cfc93c6 commit 084a899

File tree

1 file changed

+21
-43
lines changed

1 file changed

+21
-43
lines changed

fs/dax.c

Lines changed: 21 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -722,11 +722,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
722722
*/
723723
struct page *dax_layout_busy_page(struct address_space *mapping)
724724
{
725-
pgoff_t indices[PAGEVEC_SIZE];
725+
XA_STATE(xas, &mapping->i_pages, 0);
726+
void *entry;
727+
unsigned int scanned = 0;
726728
struct page *page = NULL;
727-
struct pagevec pvec;
728-
pgoff_t index, end;
729-
unsigned i;
730729

731730
/*
732731
* In the 'limited' case get_user_pages() for dax is disabled.
@@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
737736
if (!dax_mapping(mapping) || !mapping_mapped(mapping))
738737
return NULL;
739738

740-
pagevec_init(&pvec);
741-
index = 0;
742-
end = -1;
743-
744739
/*
745740
* If we race get_user_pages_fast() here either we'll see the
746-
* elevated page count in the pagevec_lookup and wait, or
741+
* elevated page count in the iteration and wait, or
747742
* get_user_pages_fast() will see that the page it took a reference
748743
* against is no longer mapped in the page tables and bail to the
749744
* get_user_pages() slow path. The slow path is protected by
@@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
755750
*/
756751
unmap_mapping_range(mapping, 0, 0, 1);
757752

758-
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
759-
min(end - index, (pgoff_t)PAGEVEC_SIZE),
760-
indices)) {
761-
for (i = 0; i < pagevec_count(&pvec); i++) {
762-
struct page *pvec_ent = pvec.pages[i];
763-
void *entry;
764-
765-
index = indices[i];
766-
if (index >= end)
767-
break;
768-
769-
if (WARN_ON_ONCE(!xa_is_value(pvec_ent)))
770-
continue;
771-
772-
xa_lock_irq(&mapping->i_pages);
773-
entry = get_unlocked_mapping_entry(mapping, index, NULL);
774-
if (entry)
775-
page = dax_busy_page(entry);
776-
put_unlocked_mapping_entry(mapping, index, entry);
777-
xa_unlock_irq(&mapping->i_pages);
778-
if (page)
779-
break;
780-
}
781-
782-
/*
783-
* We don't expect normal struct page entries to exist in our
784-
* tree, but we keep these pagevec calls so that this code is
785-
* consistent with the common pattern for handling pagevecs
786-
* throughout the kernel.
787-
*/
788-
pagevec_remove_exceptionals(&pvec);
789-
pagevec_release(&pvec);
790-
index++;
791-
753+
xas_lock_irq(&xas);
754+
xas_for_each(&xas, entry, ULONG_MAX) {
755+
if (WARN_ON_ONCE(!xa_is_value(entry)))
756+
continue;
757+
if (unlikely(dax_is_locked(entry)))
758+
entry = get_unlocked_entry(&xas);
759+
if (entry)
760+
page = dax_busy_page(entry);
761+
put_unlocked_entry(&xas, entry);
792762
if (page)
793763
break;
764+
if (++scanned % XA_CHECK_SCHED)
765+
continue;
766+
767+
xas_pause(&xas);
768+
xas_unlock_irq(&xas);
769+
cond_resched();
770+
xas_lock_irq(&xas);
794771
}
772+
xas_unlock_irq(&xas);
795773
return page;
796774
}
797775
EXPORT_SYMBOL_GPL(dax_layout_busy_page);

0 commit comments

Comments
 (0)