Skip to content

Commit c7da82b

Browse files
djbwtorvalds
authored andcommitted
mm: replace pmd_write with pmd_access_permitted in fault + gup paths
The 'access_permitted' helper is used in the gup-fast path and goes beyond the simple _PAGE_RW check to also: - validate that the mapping is writable from a protection keys standpoint - validate that the pte has _PAGE_USER set since all fault paths where pmd_write is must be referencing user-memory. Link: http://lkml.kernel.org/r/151043111049.2842.15241454964150083466.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: "Jérôme Glisse" <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e7fe7b5 commit c7da82b

File tree

5 files changed

+8
-7
lines changed

5 files changed

+8
-7
lines changed

arch/sparc/mm/gup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
7575
if (!(pmd_val(pmd) & _PAGE_VALID))
7676
return 0;
7777

78-
if (write && !pmd_write(pmd))
78+
if (!pmd_access_permitted(pmd, write))
7979
return 0;
8080

8181
refs = 0;

fs/dax.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -627,7 +627,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
627627

628628
if (pfn != pmd_pfn(*pmdp))
629629
goto unlock_pmd;
630-
if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
630+
if (!pmd_dirty(*pmdp)
631+
&& !pmd_access_permitted(*pmdp, WRITE))
631632
goto unlock_pmd;
632633

633634
flush_cache_page(vma, address, pfn);

mm/hmm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -391,11 +391,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
391391
if (pmd_protnone(pmd))
392392
return hmm_vma_walk_clear(start, end, walk);
393393

394-
if (write_fault && !pmd_write(pmd))
394+
if (!pmd_access_permitted(pmd, write_fault))
395395
return hmm_vma_walk_clear(start, end, walk);
396396

397397
pfn = pmd_pfn(pmd) + pte_index(addr);
398-
flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
398+
flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0;
399399
for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
400400
pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
401401
return 0;

mm/huge_memory.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -877,7 +877,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
877877
*/
878878
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
879879

880-
if (flags & FOLL_WRITE && !pmd_write(*pmd))
880+
if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
881881
return NULL;
882882

883883
if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -1393,7 +1393,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
13931393
*/
13941394
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
13951395
{
1396-
return pmd_write(pmd) ||
1396+
return pmd_access_permitted(pmd, WRITE) ||
13971397
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
13981398
}
13991399

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4046,7 +4046,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
40464046
if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
40474047
return do_huge_pmd_numa_page(&vmf, orig_pmd);
40484048

4049-
if (dirty && !pmd_write(orig_pmd)) {
4049+
if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
40504050
ret = wp_huge_pmd(&vmf, orig_pmd);
40514051
if (!(ret & VM_FAULT_FALLBACK))
40524052
return ret;

0 commit comments

Comments
 (0)