Skip to content

Commit 579c571

Browse files
liu-song-6torvalds
authored andcommitted
khugepaged: rename collapse_shmem() and khugepaged_scan_shmem()
Next patch will add khugepaged support of non-shmem files. This patch renames these two functions to reflect the new functionality: collapse_shmem() => collapse_file() khugepaged_scan_shmem() => khugepaged_scan_file() Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Song Liu <[email protected]> Acked-by: Rik van Riel <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: William Kucharski <[email protected]> Cc: Oleg Nesterov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 60fbf0a commit 579c571

File tree

1 file changed

+11
-12
lines changed

1 file changed

+11
-12
lines changed

mm/khugepaged.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
12871287
}
12881288

12891289
/**
1290-
* collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1290+
* collapse_file - collapse small tmpfs/shmem pages into huge one.
12911291
*
12921292
* Basic scheme is simple, details are more complex:
12931293
* - allocate and lock a new huge page;
@@ -1304,10 +1304,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
13041304
* + restore gaps in the page cache;
13051305
* + unlock and free huge page;
13061306
*/
1307-
static void collapse_shmem(struct mm_struct *mm,
1308-
struct address_space *mapping, pgoff_t start,
1307+
static void collapse_file(struct mm_struct *mm,
1308+
struct file *file, pgoff_t start,
13091309
struct page **hpage, int node)
13101310
{
1311+
struct address_space *mapping = file->f_mapping;
13111312
gfp_t gfp;
13121313
struct page *new_page;
13131314
struct mem_cgroup *memcg;
@@ -1563,11 +1564,11 @@ static void collapse_shmem(struct mm_struct *mm,
15631564
/* TODO: tracepoints */
15641565
}
15651566

1566-
static void khugepaged_scan_shmem(struct mm_struct *mm,
1567-
struct address_space *mapping,
1568-
pgoff_t start, struct page **hpage)
1567+
static void khugepaged_scan_file(struct mm_struct *mm,
1568+
struct file *file, pgoff_t start, struct page **hpage)
15691569
{
15701570
struct page *page = NULL;
1571+
struct address_space *mapping = file->f_mapping;
15711572
XA_STATE(xas, &mapping->i_pages, start);
15721573
int present, swap;
15731574
int node = NUMA_NO_NODE;
@@ -1631,16 +1632,15 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
16311632
result = SCAN_EXCEED_NONE_PTE;
16321633
} else {
16331634
node = khugepaged_find_target_node();
1634-
collapse_shmem(mm, mapping, start, hpage, node);
1635+
collapse_file(mm, file, start, hpage, node);
16351636
}
16361637
}
16371638

16381639
/* TODO: tracepoints */
16391640
}
16401641
#else
1641-
static void khugepaged_scan_shmem(struct mm_struct *mm,
1642-
struct address_space *mapping,
1643-
pgoff_t start, struct page **hpage)
1642+
static void khugepaged_scan_file(struct mm_struct *mm,
1643+
struct file *file, pgoff_t start, struct page **hpage)
16441644
{
16451645
BUILD_BUG();
16461646
}
@@ -1722,8 +1722,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
17221722
file = get_file(vma->vm_file);
17231723
up_read(&mm->mmap_sem);
17241724
ret = 1;
1725-
khugepaged_scan_shmem(mm, file->f_mapping,
1726-
pgoff, hpage);
1725+
khugepaged_scan_file(mm, file, pgoff, hpage);
17271726
fput(file);
17281727
} else {
17291728
ret = khugepaged_scan_pmd(mm, vma,

0 commit comments

Comments
 (0)