Skip to content

Commit 3506659

Browse files
author
Matthew Wilcox (Oracle)
committed
mm: Add unmap_mapping_folio()
Convert both callers of unmap_mapping_page() to call unmap_mapping_folio() instead. Also move zap_details from linux/mm.h to mm/memory.c Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: William Kucharski <[email protected]>
1 parent efe99bb commit 3506659

File tree

4 files changed

+40
-41
lines changed

4 files changed

+40
-41
lines changed

include/linux/mm.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1825,28 +1825,6 @@ static inline bool can_do_mlock(void) { return false; }
18251825
extern int user_shm_lock(size_t, struct ucounts *);
18261826
extern void user_shm_unlock(size_t, struct ucounts *);
18271827

1828-
/*
1829-
* Parameter block passed down to zap_pte_range in exceptional cases.
1830-
*/
1831-
struct zap_details {
1832-
struct address_space *zap_mapping; /* Check page->mapping if set */
1833-
struct page *single_page; /* Locked page to be unmapped */
1834-
};
1835-
1836-
/*
1837-
* We set details->zap_mappings when we want to unmap shared but keep private
1838-
* pages. Return true if skip zapping this page, false otherwise.
1839-
*/
1840-
static inline bool
1841-
zap_skip_check_mapping(struct zap_details *details, struct page *page)
1842-
{
1843-
if (!details || !page)
1844-
return false;
1845-
1846-
return details->zap_mapping &&
1847-
(details->zap_mapping != page_rmapping(page));
1848-
}
1849-
18501828
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
18511829
pte_t pte);
18521830
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1892,7 +1870,6 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
18921870
extern int fixup_user_fault(struct mm_struct *mm,
18931871
unsigned long address, unsigned int fault_flags,
18941872
bool *unlocked);
1895-
void unmap_mapping_page(struct page *page);
18961873
void unmap_mapping_pages(struct address_space *mapping,
18971874
pgoff_t start, pgoff_t nr, bool even_cows);
18981875
void unmap_mapping_range(struct address_space *mapping,
@@ -1913,7 +1890,6 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
19131890
BUG();
19141891
return -EFAULT;
19151892
}
1916-
static inline void unmap_mapping_page(struct page *page) { }
19171893
static inline void unmap_mapping_pages(struct address_space *mapping,
19181894
pgoff_t start, pgoff_t nr, bool even_cows) { }
19191895
static inline void unmap_mapping_range(struct address_space *mapping,

mm/internal.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
7474
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
7575
}
7676

77+
struct zap_details;
7778
void unmap_page_range(struct mmu_gather *tlb,
7879
struct vm_area_struct *vma,
7980
unsigned long addr, unsigned long end,
@@ -388,6 +389,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
388389
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
389390

390391
#ifdef CONFIG_MMU
392+
void unmap_mapping_folio(struct folio *folio);
391393
extern long populate_vma_page_range(struct vm_area_struct *vma,
392394
unsigned long start, unsigned long end, int *locked);
393395
extern long faultin_vma_page_range(struct vm_area_struct *vma,
@@ -491,8 +493,8 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
491493
}
492494
return fpin;
493495
}
494-
495496
#else /* !CONFIG_MMU */
497+
static inline void unmap_mapping_folio(struct folio *folio) { }
496498
static inline void clear_page_mlock(struct page *page) { }
497499
static inline void mlock_vma_page(struct page *page) { }
498500
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)

mm/memory.c

Lines changed: 35 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1304,6 +1304,28 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
13041304
return ret;
13051305
}
13061306

1307+
/*
1308+
* Parameter block passed down to zap_pte_range in exceptional cases.
1309+
*/
1310+
struct zap_details {
1311+
struct address_space *zap_mapping; /* Check page->mapping if set */
1312+
struct folio *single_folio; /* Locked folio to be unmapped */
1313+
};
1314+
1315+
/*
1316+
* We set details->zap_mapping when we want to unmap shared but keep private
1317+
* pages. Return true if skip zapping this page, false otherwise.
1318+
*/
1319+
static inline bool
1320+
zap_skip_check_mapping(struct zap_details *details, struct page *page)
1321+
{
1322+
if (!details || !page)
1323+
return false;
1324+
1325+
return details->zap_mapping &&
1326+
(details->zap_mapping != page_rmapping(page));
1327+
}
1328+
13071329
static unsigned long zap_pte_range(struct mmu_gather *tlb,
13081330
struct vm_area_struct *vma, pmd_t *pmd,
13091331
unsigned long addr, unsigned long end,
@@ -1443,8 +1465,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
14431465
else if (zap_huge_pmd(tlb, vma, pmd, addr))
14441466
goto next;
14451467
/* fall through */
1446-
} else if (details && details->single_page &&
1447-
PageTransCompound(details->single_page) &&
1468+
} else if (details && details->single_folio &&
1469+
folio_test_pmd_mappable(details->single_folio) &&
14481470
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
14491471
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
14501472
/*
@@ -3332,31 +3354,30 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
33323354
}
33333355

33343356
/**
3335-
* unmap_mapping_page() - Unmap single page from processes.
3336-
* @page: The locked page to be unmapped.
3357+
* unmap_mapping_folio() - Unmap single folio from processes.
3358+
* @folio: The locked folio to be unmapped.
33373359
*
3338-
* Unmap this page from any userspace process which still has it mmaped.
3360+
* Unmap this folio from any userspace process which still has it mmaped.
33393361
* Typically, for efficiency, the range of nearby pages has already been
33403362
* unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3341-
* truncation or invalidation holds the lock on a page, it may find that
3342-
* the page has been remapped again: and then uses unmap_mapping_page()
3363+
* truncation or invalidation holds the lock on a folio, it may find that
3364+
* the page has been remapped again: and then uses unmap_mapping_folio()
33433365
* to unmap it finally.
33443366
*/
3345-
void unmap_mapping_page(struct page *page)
3367+
void unmap_mapping_folio(struct folio *folio)
33463368
{
3347-
struct address_space *mapping = page->mapping;
3369+
struct address_space *mapping = folio->mapping;
33483370
struct zap_details details = { };
33493371
pgoff_t first_index;
33503372
pgoff_t last_index;
33513373

3352-
VM_BUG_ON(!PageLocked(page));
3353-
VM_BUG_ON(PageTail(page));
3374+
VM_BUG_ON(!folio_test_locked(folio));
33543375

3355-
first_index = page->index;
3356-
last_index = page->index + thp_nr_pages(page) - 1;
3376+
first_index = folio->index;
3377+
last_index = folio->index + folio_nr_pages(folio) - 1;
33573378

33583379
details.zap_mapping = mapping;
3359-
details.single_page = page;
3380+
details.single_folio = folio;
33603381

33613382
i_mmap_lock_write(mapping);
33623383
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))

mm/truncate.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ void do_invalidatepage(struct page *page, unsigned int offset,
180180
static void truncate_cleanup_folio(struct folio *folio)
181181
{
182182
if (folio_mapped(folio))
183-
unmap_mapping_page(&folio->page);
183+
unmap_mapping_folio(folio);
184184

185185
if (folio_has_private(folio))
186186
do_invalidatepage(&folio->page, 0, folio_size(folio));
@@ -670,7 +670,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
670670
wait_on_page_writeback(page);
671671

672672
if (page_mapped(page))
673-
unmap_mapping_page(page);
673+
unmap_mapping_folio(page_folio(page));
674674
BUG_ON(page_mapped(page));
675675

676676
ret2 = do_launder_page(mapping, page);

0 commit comments

Comments
 (0)