@@ -99,6 +99,17 @@ static void *dax_make_locked(unsigned long pfn, unsigned long flags)
99
99
DAX_LOCKED );
100
100
}
101
101
102
+ static void * dax_make_entry (pfn_t pfn , unsigned long flags )
103
+ {
104
+ return xa_mk_value (flags | (pfn_t_to_pfn (pfn ) << DAX_SHIFT ));
105
+ }
106
+
107
+ static void * dax_make_page_entry (struct page * page )
108
+ {
109
+ pfn_t pfn = page_to_pfn_t (page );
110
+ return dax_make_entry (pfn , PageHead (page ) ? DAX_PMD : 0 );
111
+ }
112
+
102
113
static bool dax_is_locked (void * entry )
103
114
{
104
115
return xa_to_value (entry ) & DAX_LOCKED ;
@@ -487,33 +498,16 @@ static struct page *dax_busy_page(void *entry)
487
498
return NULL ;
488
499
}
489
500
490
- static bool entry_wait_revalidate (void )
491
- {
492
- rcu_read_unlock ();
493
- schedule ();
494
- rcu_read_lock ();
495
-
496
- /*
497
- * Tell __get_unlocked_mapping_entry() to take a break, we need
498
- * to revalidate page->mapping after dropping locks
499
- */
500
- return true;
501
- }
502
-
503
501
bool dax_lock_mapping_entry (struct page * page )
504
502
{
505
- pgoff_t index ;
506
- struct inode * inode ;
507
- bool did_lock = false;
508
- void * entry = NULL , * * slot ;
509
- struct address_space * mapping ;
503
+ XA_STATE (xas , NULL , 0 );
504
+ void * entry ;
510
505
511
- rcu_read_lock ();
512
506
for (;;) {
513
- mapping = READ_ONCE (page -> mapping );
507
+ struct address_space * mapping = READ_ONCE (page -> mapping );
514
508
515
509
if (!dax_mapping (mapping ))
516
- break ;
510
+ return false ;
517
511
518
512
/*
519
513
* In the device-dax case there's no need to lock, a
@@ -522,47 +516,40 @@ bool dax_lock_mapping_entry(struct page *page)
522
516
* otherwise we would not have a valid pfn_to_page()
523
517
* translation.
524
518
*/
525
- inode = mapping -> host ;
526
- if (S_ISCHR (inode -> i_mode )) {
527
- did_lock = true;
528
- break ;
529
- }
519
+ if (S_ISCHR (mapping -> host -> i_mode ))
520
+ return true;
530
521
531
- xa_lock_irq (& mapping -> i_pages );
522
+ xas .xa = & mapping -> i_pages ;
523
+ xas_lock_irq (& xas );
532
524
if (mapping != page -> mapping ) {
533
- xa_unlock_irq ( & mapping -> i_pages );
525
+ xas_unlock_irq ( & xas );
534
526
continue ;
535
527
}
536
- index = page -> index ;
537
-
538
- entry = __get_unlocked_mapping_entry (mapping , index , & slot ,
539
- entry_wait_revalidate );
540
- if (!entry ) {
541
- xa_unlock_irq (& mapping -> i_pages );
542
- break ;
543
- } else if (IS_ERR (entry )) {
544
- WARN_ON_ONCE (PTR_ERR (entry ) != - EAGAIN );
545
- continue ;
528
+ xas_set (& xas , page -> index );
529
+ entry = xas_load (& xas );
530
+ if (dax_is_locked (entry )) {
531
+ entry = get_unlocked_entry (& xas );
532
+ /* Did the page move while we slept? */
533
+ if (dax_to_pfn (entry ) != page_to_pfn (page )) {
534
+ xas_unlock_irq (& xas );
535
+ continue ;
536
+ }
546
537
}
547
- lock_slot (mapping , slot );
548
- did_lock = true;
549
- xa_unlock_irq (& mapping -> i_pages );
550
- break ;
538
+ dax_lock_entry (& xas , entry );
539
+ xas_unlock_irq (& xas );
540
+ return true;
551
541
}
552
- rcu_read_unlock ();
553
-
554
- return did_lock ;
555
542
}
556
543
557
544
void dax_unlock_mapping_entry (struct page * page )
558
545
{
559
546
struct address_space * mapping = page -> mapping ;
560
- struct inode * inode = mapping -> host ;
547
+ XA_STATE ( xas , & mapping -> i_pages , page -> index ) ;
561
548
562
- if (S_ISCHR (inode -> i_mode ))
549
+ if (S_ISCHR (mapping -> host -> i_mode ))
563
550
return ;
564
551
565
- unlock_mapping_entry ( mapping , page -> index );
552
+ dax_unlock_entry ( & xas , dax_make_page_entry ( page ) );
566
553
}
567
554
568
555
/*
0 commit comments