Skip to content

Commit 977fbdc

Browse files
Matthew Wilcoxtorvalds
authored andcommitted
mm: add unmap_mapping_pages()
Several users of unmap_mapping_range() would prefer to express their range in pages rather than bytes. Unfortuately, on a 32-bit kernel, you have to remember to cast your page number to a 64-bit type before shifting it, and four places in the current tree didn't remember to do that. That's a sign of a bad interface. Conveniently, unmap_mapping_range() actually converts from bytes into pages, so hoist the guts of unmap_mapping_range() into a new function unmap_mapping_pages() and convert the callers which want to use pages. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox <[email protected]> Reported-by: "zhangyi (F)" <[email protected]> Reviewed-by: Ross Zwisler <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent a365ac0 commit 977fbdc

File tree

6 files changed

+61
-60
lines changed

6 files changed

+61
-60
lines changed

fs/dax.c

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444

4545
/* The 'colour' (ie low bits) within a PMD of a page offset. */
4646
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47+
#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
4748

4849
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
4950

@@ -375,8 +376,8 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
375376
* unmapped.
376377
*/
377378
if (pmd_downgrade && dax_is_zero_entry(entry))
378-
unmap_mapping_range(mapping,
379-
(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
379+
unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
380+
PG_PMD_NR, false);
380381

381382
err = radix_tree_preload(
382383
mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
@@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
538539
if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
539540
/* we are replacing a zero page with block mapping */
540541
if (dax_is_pmd_entry(entry))
541-
unmap_mapping_range(mapping,
542-
(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
543-
PMD_SIZE, 0);
542+
unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
543+
PG_PMD_NR, false);
544544
else /* pte entry */
545-
unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
546-
PAGE_SIZE, 0);
545+
unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
547546
}
548547

549548
spin_lock_irq(&mapping->tree_lock);
@@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
12691268
}
12701269

12711270
#ifdef CONFIG_FS_DAX_PMD
1272-
/*
1273-
* The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1274-
* more often than one might expect in the below functions.
1275-
*/
1276-
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1277-
12781271
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
12791272
void *entry)
12801273
{

include/linux/mm.h

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1312,8 +1312,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
13121312
unsigned long end, unsigned long floor, unsigned long ceiling);
13131313
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
13141314
struct vm_area_struct *vma);
1315-
void unmap_mapping_range(struct address_space *mapping,
1316-
loff_t const holebegin, loff_t const holelen, int even_cows);
13171315
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
13181316
unsigned long *start, unsigned long *end,
13191317
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
@@ -1324,12 +1322,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
13241322
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
13251323
void *buf, int len, int write);
13261324

1327-
static inline void unmap_shared_mapping_range(struct address_space *mapping,
1328-
loff_t const holebegin, loff_t const holelen)
1329-
{
1330-
unmap_mapping_range(mapping, holebegin, holelen, 0);
1331-
}
1332-
13331325
extern void truncate_pagecache(struct inode *inode, loff_t new);
13341326
extern void truncate_setsize(struct inode *inode, loff_t newsize);
13351327
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -1344,6 +1336,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
13441336
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
13451337
unsigned long address, unsigned int fault_flags,
13461338
bool *unlocked);
1339+
void unmap_mapping_pages(struct address_space *mapping,
1340+
pgoff_t start, pgoff_t nr, bool even_cows);
1341+
void unmap_mapping_range(struct address_space *mapping,
1342+
loff_t const holebegin, loff_t const holelen, int even_cows);
13471343
#else
13481344
static inline int handle_mm_fault(struct vm_area_struct *vma,
13491345
unsigned long address, unsigned int flags)
@@ -1360,10 +1356,20 @@ static inline int fixup_user_fault(struct task_struct *tsk,
13601356
BUG();
13611357
return -EFAULT;
13621358
}
1359+
static inline void unmap_mapping_pages(struct address_space *mapping,
1360+
pgoff_t start, pgoff_t nr, bool even_cows) { }
1361+
static inline void unmap_mapping_range(struct address_space *mapping,
1362+
loff_t const holebegin, loff_t const holelen, int even_cows) { }
13631363
#endif
13641364

1365-
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1366-
unsigned int gup_flags);
1365+
static inline void unmap_shared_mapping_range(struct address_space *mapping,
1366+
loff_t const holebegin, loff_t const holelen)
1367+
{
1368+
unmap_mapping_range(mapping, holebegin, holelen, 0);
1369+
}
1370+
1371+
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1372+
void *buf, int len, unsigned int gup_flags);
13671373
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
13681374
void *buf, int len, unsigned int gup_flags);
13691375
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,

mm/khugepaged.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1399,8 +1399,7 @@ static void collapse_shmem(struct mm_struct *mm,
13991399
}
14001400

14011401
if (page_mapped(page))
1402-
unmap_mapping_range(mapping, index << PAGE_SHIFT,
1403-
PAGE_SIZE, 0);
1402+
unmap_mapping_pages(mapping, index, 1, false);
14041403

14051404
spin_lock_irq(&mapping->tree_lock);
14061405

mm/memory.c

Lines changed: 31 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2798,9 +2798,38 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
27982798
}
27992799
}
28002800

2801+
/**
2802+
* unmap_mapping_pages() - Unmap pages from processes.
2803+
* @mapping: The address space containing pages to be unmapped.
2804+
* @start: Index of first page to be unmapped.
2805+
* @nr: Number of pages to be unmapped. 0 to unmap to end of file.
2806+
* @even_cows: Whether to unmap even private COWed pages.
2807+
*
2808+
* Unmap the pages in this address space from any userspace process which
2809+
* has them mmaped. Generally, you want to remove COWed pages as well when
2810+
* a file is being truncated, but not when invalidating pages from the page
2811+
* cache.
2812+
*/
2813+
void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
2814+
pgoff_t nr, bool even_cows)
2815+
{
2816+
struct zap_details details = { };
2817+
2818+
details.check_mapping = even_cows ? NULL : mapping;
2819+
details.first_index = start;
2820+
details.last_index = start + nr - 1;
2821+
if (details.last_index < details.first_index)
2822+
details.last_index = ULONG_MAX;
2823+
2824+
i_mmap_lock_write(mapping);
2825+
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
2826+
unmap_mapping_range_tree(&mapping->i_mmap, &details);
2827+
i_mmap_unlock_write(mapping);
2828+
}
2829+
28012830
/**
28022831
* unmap_mapping_range - unmap the portion of all mmaps in the specified
2803-
* address_space corresponding to the specified page range in the underlying
2832+
* address_space corresponding to the specified byte range in the underlying
28042833
* file.
28052834
*
28062835
* @mapping: the address space containing mmaps to be unmapped.
@@ -2818,7 +2847,6 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
28182847
void unmap_mapping_range(struct address_space *mapping,
28192848
loff_t const holebegin, loff_t const holelen, int even_cows)
28202849
{
2821-
struct zap_details details = { };
28222850
pgoff_t hba = holebegin >> PAGE_SHIFT;
28232851
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
28242852

@@ -2830,16 +2858,7 @@ void unmap_mapping_range(struct address_space *mapping,
28302858
hlen = ULONG_MAX - hba + 1;
28312859
}
28322860

2833-
details.check_mapping = even_cows ? NULL : mapping;
2834-
details.first_index = hba;
2835-
details.last_index = hba + hlen - 1;
2836-
if (details.last_index < details.first_index)
2837-
details.last_index = ULONG_MAX;
2838-
2839-
i_mmap_lock_write(mapping);
2840-
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
2841-
unmap_mapping_range_tree(&mapping->i_mmap, &details);
2842-
i_mmap_unlock_write(mapping);
2861+
unmap_mapping_pages(mapping, hba, hlen, even_cows);
28432862
}
28442863
EXPORT_SYMBOL(unmap_mapping_range);
28452864

mm/nommu.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1788,13 +1788,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
17881788
return -ENOMEM;
17891789
}
17901790

1791-
void unmap_mapping_range(struct address_space *mapping,
1792-
loff_t const holebegin, loff_t const holelen,
1793-
int even_cows)
1794-
{
1795-
}
1796-
EXPORT_SYMBOL(unmap_mapping_range);
1797-
17981791
int filemap_fault(struct vm_fault *vmf)
17991792
{
18001793
BUG();

mm/truncate.c

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -179,12 +179,8 @@ static void
179179
truncate_cleanup_page(struct address_space *mapping, struct page *page)
180180
{
181181
if (page_mapped(page)) {
182-
loff_t holelen;
183-
184-
holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
185-
unmap_mapping_range(mapping,
186-
(loff_t)page->index << PAGE_SHIFT,
187-
holelen, 0);
182+
pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
183+
unmap_mapping_pages(mapping, page->index, nr, false);
188184
}
189185

190186
if (page_has_private(page))
@@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
715711
/*
716712
* Zap the rest of the file in one hit.
717713
*/
718-
unmap_mapping_range(mapping,
719-
(loff_t)index << PAGE_SHIFT,
720-
(loff_t)(1 + end - index)
721-
<< PAGE_SHIFT,
722-
0);
714+
unmap_mapping_pages(mapping, index,
715+
(1 + end - index), false);
723716
did_range_unmap = 1;
724717
} else {
725718
/*
726719
* Just zap this page
727720
*/
728-
unmap_mapping_range(mapping,
729-
(loff_t)index << PAGE_SHIFT,
730-
PAGE_SIZE, 0);
721+
unmap_mapping_pages(mapping, index,
722+
1, false);
731723
}
732724
}
733725
BUG_ON(page_mapped(page));
@@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
753745
* get remapped later.
754746
*/
755747
if (dax_mapping(mapping)) {
756-
unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
757-
(loff_t)(end - start + 1) << PAGE_SHIFT, 0);
748+
unmap_mapping_pages(mapping, start, end - start + 1, false);
758749
}
759750
out:
760751
cleancache_invalidate_inode(mapping);

0 commit comments

Comments
 (0)