Skip to content

Commit e121e41

Browse files
venkatesh.pallipadi@intel.comH. Peter Anvin
authored andcommitted
x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3
Impact: New currently unused interface. Add a generic interface to follow pfn in a pfnmap vma range. This is used by one of the subsequent x86 PAT related patch to keep track of memory types for vma regions across vma copy and free. Signed-off-by: Venkatesh Pallipadi <[email protected]> Signed-off-by: Suresh Siddha <[email protected]> Signed-off-by: H. Peter Anvin <[email protected]>
1 parent 3c8bb73 commit e121e41

File tree

2 files changed

+46
-0
lines changed

2 files changed

+46
-0
lines changed

include/linux/mm.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1223,6 +1223,9 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
12231223
#define FOLL_GET 0x04 /* do get_page on page */
12241224
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
12251225

1226+
int follow_pfnmap_pte(struct vm_area_struct *vma,
1227+
unsigned long address, pte_t *ret_ptep);
1228+
12261229
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
12271230
void *data);
12281231
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,

mm/memory.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1111,6 +1111,49 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
11111111
return page;
11121112
}
11131113

1114+
int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
1115+
pte_t *ret_ptep)
1116+
{
1117+
pgd_t *pgd;
1118+
pud_t *pud;
1119+
pmd_t *pmd;
1120+
pte_t *ptep, pte;
1121+
spinlock_t *ptl;
1122+
struct page *page;
1123+
struct mm_struct *mm = vma->vm_mm;
1124+
1125+
if (!is_pfn_mapping(vma))
1126+
goto err;
1127+
1128+
page = NULL;
1129+
pgd = pgd_offset(mm, address);
1130+
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1131+
goto err;
1132+
1133+
pud = pud_offset(pgd, address);
1134+
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
1135+
goto err;
1136+
1137+
pmd = pmd_offset(pud, address);
1138+
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
1139+
goto err;
1140+
1141+
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1142+
1143+
pte = *ptep;
1144+
if (!pte_present(pte))
1145+
goto err_unlock;
1146+
1147+
*ret_ptep = pte;
1148+
pte_unmap_unlock(ptep, ptl);
1149+
return 0;
1150+
1151+
err_unlock:
1152+
pte_unmap_unlock(ptep, ptl);
1153+
err:
1154+
return -EINVAL;
1155+
}
1156+
11141157
/* Can we do the FOLL_ANON optimization? */
11151158
static inline int use_zero_page(struct vm_area_struct *vma)
11161159
{

0 commit comments

Comments
 (0)