Skip to content

Commit 9f32d22

Browse files
author
Matthew Wilcox
committed
dax: Convert dax_lock_mapping_entry to XArray
Instead of always retrying when we slept, only retry if the page has moved. Signed-off-by: Matthew Wilcox <[email protected]>
1 parent 9fc747f commit 9f32d22

File tree

1 file changed

+35
-48
lines changed

1 file changed

+35
-48
lines changed

fs/dax.c

Lines changed: 35 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,17 @@ static void *dax_make_locked(unsigned long pfn, unsigned long flags)
9999
DAX_LOCKED);
100100
}
101101

102+
static void *dax_make_entry(pfn_t pfn, unsigned long flags)
103+
{
104+
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
105+
}
106+
107+
static void *dax_make_page_entry(struct page *page)
108+
{
109+
pfn_t pfn = page_to_pfn_t(page);
110+
return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
111+
}
112+
102113
static bool dax_is_locked(void *entry)
103114
{
104115
return xa_to_value(entry) & DAX_LOCKED;
@@ -487,33 +498,16 @@ static struct page *dax_busy_page(void *entry)
487498
return NULL;
488499
}
489500

490-
static bool entry_wait_revalidate(void)
491-
{
492-
rcu_read_unlock();
493-
schedule();
494-
rcu_read_lock();
495-
496-
/*
497-
* Tell __get_unlocked_mapping_entry() to take a break, we need
498-
* to revalidate page->mapping after dropping locks
499-
*/
500-
return true;
501-
}
502-
503501
bool dax_lock_mapping_entry(struct page *page)
504502
{
505-
pgoff_t index;
506-
struct inode *inode;
507-
bool did_lock = false;
508-
void *entry = NULL, **slot;
509-
struct address_space *mapping;
503+
XA_STATE(xas, NULL, 0);
504+
void *entry;
510505

511-
rcu_read_lock();
512506
for (;;) {
513-
mapping = READ_ONCE(page->mapping);
507+
struct address_space *mapping = READ_ONCE(page->mapping);
514508

515509
if (!dax_mapping(mapping))
516-
break;
510+
return false;
517511

518512
/*
519513
* In the device-dax case there's no need to lock, a
@@ -522,47 +516,40 @@ bool dax_lock_mapping_entry(struct page *page)
522516
* otherwise we would not have a valid pfn_to_page()
523517
* translation.
524518
*/
525-
inode = mapping->host;
526-
if (S_ISCHR(inode->i_mode)) {
527-
did_lock = true;
528-
break;
529-
}
519+
if (S_ISCHR(mapping->host->i_mode))
520+
return true;
530521

531-
xa_lock_irq(&mapping->i_pages);
522+
xas.xa = &mapping->i_pages;
523+
xas_lock_irq(&xas);
532524
if (mapping != page->mapping) {
533-
xa_unlock_irq(&mapping->i_pages);
525+
xas_unlock_irq(&xas);
534526
continue;
535527
}
536-
index = page->index;
537-
538-
entry = __get_unlocked_mapping_entry(mapping, index, &slot,
539-
entry_wait_revalidate);
540-
if (!entry) {
541-
xa_unlock_irq(&mapping->i_pages);
542-
break;
543-
} else if (IS_ERR(entry)) {
544-
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
545-
continue;
528+
xas_set(&xas, page->index);
529+
entry = xas_load(&xas);
530+
if (dax_is_locked(entry)) {
531+
entry = get_unlocked_entry(&xas);
532+
/* Did the page move while we slept? */
533+
if (dax_to_pfn(entry) != page_to_pfn(page)) {
534+
xas_unlock_irq(&xas);
535+
continue;
536+
}
546537
}
547-
lock_slot(mapping, slot);
548-
did_lock = true;
549-
xa_unlock_irq(&mapping->i_pages);
550-
break;
538+
dax_lock_entry(&xas, entry);
539+
xas_unlock_irq(&xas);
540+
return true;
551541
}
552-
rcu_read_unlock();
553-
554-
return did_lock;
555542
}
556543

557544
void dax_unlock_mapping_entry(struct page *page)
558545
{
559546
struct address_space *mapping = page->mapping;
560-
struct inode *inode = mapping->host;
547+
XA_STATE(xas, &mapping->i_pages, page->index);
561548

562-
if (S_ISCHR(inode->i_mode))
549+
if (S_ISCHR(mapping->host->i_mode))
563550
return;
564551

565-
unlock_mapping_entry(mapping, page->index);
552+
dax_unlock_entry(&xas, dax_make_page_entry(page));
566553
}
567554

568555
/*

0 commit comments

Comments
 (0)