Skip to content

Commit 2f89dc1

Browse files
jankaratorvalds
authored andcommitted
dax: protect PTE modification on WP fault by radix tree entry lock
Currently PTE gets updated in wp_pfn_shared() after dax_pfn_mkwrite() has released corresponding radix tree entry lock. When we want to writeprotect PTE on cache flush, we need PTE modification to happen under radix tree entry lock to ensure consistent updates of PTE and radix tree (standard faults use page lock to ensure this consistency). So move update of PTE bit into dax_pfn_mkwrite(). Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Jan Kara <[email protected]> Reviewed-by: Ross Zwisler <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Dan Williams <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent a6abc2c commit 2f89dc1

File tree

2 files changed

+17
-7
lines changed

2 files changed

+17
-7
lines changed

fs/dax.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -783,17 +783,27 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
783783
{
784784
struct file *file = vma->vm_file;
785785
struct address_space *mapping = file->f_mapping;
786-
void *entry;
786+
void *entry, **slot;
787787
pgoff_t index = vmf->pgoff;
788788

789789
spin_lock_irq(&mapping->tree_lock);
790-
entry = get_unlocked_mapping_entry(mapping, index, NULL);
791-
if (!entry || !radix_tree_exceptional_entry(entry))
792-
goto out;
790+
entry = get_unlocked_mapping_entry(mapping, index, &slot);
791+
if (!entry || !radix_tree_exceptional_entry(entry)) {
792+
if (entry)
793+
put_unlocked_mapping_entry(mapping, index, entry);
794+
spin_unlock_irq(&mapping->tree_lock);
795+
return VM_FAULT_NOPAGE;
796+
}
793797
radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
794-
put_unlocked_mapping_entry(mapping, index, entry);
795-
out:
798+
entry = lock_slot(mapping, slot);
796799
spin_unlock_irq(&mapping->tree_lock);
800+
/*
801+
* If we race with somebody updating the PTE and finish_mkwrite_fault()
802+
* fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
803+
* the fault in either case.
804+
*/
805+
finish_mkwrite_fault(vmf);
806+
put_locked_mapping_entry(mapping, index, entry);
797807
return VM_FAULT_NOPAGE;
798808
}
799809
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2315,7 +2315,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
23152315
pte_unmap_unlock(vmf->pte, vmf->ptl);
23162316
vmf->flags |= FAULT_FLAG_MKWRITE;
23172317
ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
2318-
if (ret & VM_FAULT_ERROR)
2318+
if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
23192319
return ret;
23202320
return finish_mkwrite_fault(vmf);
23212321
}

0 commit comments

Comments
 (0)