Skip to content

Commit a145dd4

Browse files
author
Linus Torvalds
committed
VM: add "vm_insert_page()" function
This is what a lot of drivers will actually want to use to insert individual pages into a user VMA. It doesn't have the old PageReserved restrictions of remap_pfn_range(), and it doesn't complain about partial remappings. The page you insert needs to be a nice clean kernel allocation, so you can't insert arbitrary page mappings with this, but that's not what people want. Signed-off-by: Linus Torvalds <[email protected]>
1 parent f8e9884 commit a145dd4

File tree

2 files changed

+35
-2
lines changed

2 files changed

+35
-2
lines changed

include/linux/mm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -956,6 +956,7 @@ struct page *vmalloc_to_page(void *addr);
956956
unsigned long vmalloc_to_pfn(void *addr);
957957
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
958958
unsigned long pfn, unsigned long size, pgprot_t);
959+
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
959960

960961
struct page *follow_page(struct vm_area_struct *, unsigned long address,
961962
unsigned int foll_flags);

mm/memory.c

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1172,7 +1172,7 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
11721172
spinlock_t *ptl;
11731173

11741174
retval = -EINVAL;
1175-
if (PageAnon(page) || !PageReserved(page))
1175+
if (PageAnon(page))
11761176
goto out;
11771177
retval = -ENOMEM;
11781178
flush_dcache_page(page);
@@ -1196,6 +1196,35 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
11961196
return retval;
11971197
}
11981198

1199+
/*
1200+
* This allows drivers to insert individual pages they've allocated
1201+
* into a user vma.
1202+
*
1203+
* The page has to be a nice clean _individual_ kernel allocation.
1204+
* If you allocate a compound page, you need to have marked it as
1205+
* such (__GFP_COMP), or manually just split the page up yourself
1206+
* (which is mainly an issue of doing "set_page_count(page, 1)" for
1207+
* each sub-page, and then freeing them one by one when you free
1208+
* them rather than freeing it as a compound page).
1209+
*
1210+
* NOTE! Traditionally this was done with "remap_pfn_range()" which
1211+
* took an arbitrary page protection parameter. This doesn't allow
1212+
* that. Your vma protection will have to be set up correctly, which
1213+
* means that if you want a shared writable mapping, you'd better
1214+
* ask for a shared writable mapping!
1215+
*
1216+
* The page does not need to be reserved.
1217+
*/
1218+
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
1219+
{
1220+
if (addr < vma->vm_start || addr >= vma->vm_end)
1221+
return -EFAULT;
1222+
if (!page_count(page))
1223+
return -EINVAL;
1224+
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
1225+
}
1226+
EXPORT_SYMBOL_GPL(vm_insert_page);
1227+
11991228
/*
12001229
* Somebody does a pfn remapping that doesn't actually work as a vma.
12011230
*
@@ -1225,8 +1254,11 @@ static int incomplete_pfn_remap(struct vm_area_struct *vma,
12251254
if (!pfn_valid(pfn))
12261255
return -EINVAL;
12271256

1228-
retval = 0;
12291257
page = pfn_to_page(pfn);
1258+
if (!PageReserved(page))
1259+
return -EINVAL;
1260+
1261+
retval = 0;
12301262
while (start < end) {
12311263
retval = insert_page(vma->vm_mm, start, page, prot);
12321264
if (retval < 0)

0 commit comments

Comments
 (0)