Skip to content

Commit 800d8c6

Browse files
kiryltorvalds
authored andcommitted
shmem: add huge pages support
Here's basic implementation of huge pages support for shmem/tmpfs. It's all pretty streight-forward: - shmem_getpage() allcoates huge page if it can and try to inserd into radix tree with shmem_add_to_page_cache(); - shmem_add_to_page_cache() puts the page onto radix-tree if there's space for it; - shmem_undo_range() removes huge pages, if it fully within range. Partial truncate of huge pages zero out this part of THP. This have visible effect on fallocate(FALLOC_FL_PUNCH_HOLE) behaviour. As we don't really create hole in this case, lseek(SEEK_HOLE) may have inconsistent results depending what pages happened to be allocated. - no need to change shmem_fault: core-mm will map an compound page as huge if VMA is suitable; Link: http://lkml.kernel.org/r/1466021202-61880-30-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent c01d5b3 commit 800d8c6

File tree

9 files changed

+331
-70
lines changed

9 files changed

+331
-70
lines changed

include/linux/huge_mm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,8 @@ void put_huge_zero_page(void);
156156

157157
#define transparent_hugepage_enabled(__vma) 0
158158

159+
static inline void prep_transhuge_page(struct page *page) {}
160+
159161
#define transparent_hugepage_flags 0UL
160162
static inline int
161163
split_huge_page_to_list(struct page *page, struct list_head *list)

include/linux/shmem_fs.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ static inline struct page *shmem_read_mapping_page(
7171
mapping_gfp_mask(mapping));
7272
}
7373

74+
extern bool shmem_charge(struct inode *inode, long pages);
75+
extern void shmem_uncharge(struct inode *inode, long pages);
76+
7477
#ifdef CONFIG_TMPFS
7578

7679
extern int shmem_add_seals(struct file *file, unsigned int seals);

mm/filemap.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,8 +219,13 @@ void __delete_from_page_cache(struct page *page, void *shadow)
219219
/* hugetlb pages do not participate in page cache accounting. */
220220
if (!PageHuge(page))
221221
__mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr);
222-
if (PageSwapBacked(page))
222+
if (PageSwapBacked(page)) {
223223
__mod_zone_page_state(page_zone(page), NR_SHMEM, -nr);
224+
if (PageTransHuge(page))
225+
__dec_zone_page_state(page, NR_SHMEM_THPS);
226+
} else {
227+
VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
228+
}
224229

225230
/*
226231
* At this point page must be either written or cleaned by truncate.

mm/huge_memory.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3316,6 +3316,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
33163316
if (head[i].index >= end) {
33173317
__ClearPageDirty(head + i);
33183318
__delete_from_page_cache(head + i, NULL);
3319+
if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
3320+
shmem_uncharge(head->mapping->host, 1);
33193321
put_page(head + i);
33203322
}
33213323
}

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1142,7 +1142,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
11421142
* unmap shared but keep private pages.
11431143
*/
11441144
if (details->check_mapping &&
1145-
details->check_mapping != page->mapping)
1145+
details->check_mapping != page_rmapping(page))
11461146
continue;
11471147
}
11481148
ptent = ptep_get_and_clear_full(mm, addr, pte,

mm/mempolicy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
531531
nid = page_to_nid(page);
532532
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
533533
continue;
534-
if (PageTransCompound(page) && PageAnon(page)) {
534+
if (PageTransCompound(page)) {
535535
get_page(page);
536536
pte_unmap_unlock(pte, ptl);
537537
lock_page(page);

mm/page-writeback.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2563,6 +2563,7 @@ int set_page_dirty(struct page *page)
25632563
{
25642564
struct address_space *mapping = page_mapping(page);
25652565

2566+
page = compound_head(page);
25662567
if (likely(mapping)) {
25672568
int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
25682569
/*

0 commit comments

Comments
 (0)