Skip to content

Commit 0efadf4

Browse files
Yisheng Xietorvalds
authored andcommitted
mm/hotplug: enable memory hotplug for non-lru movable pages
We had considered all of the non-lru pages as unmovable before commit bda807d ("mm: migrate: support non-lru movable page migration"). But now some of non-lru pages like zsmalloc, virtio-balloon pages also become movable. So we can offline such blocks by using non-lru page migration. This patch straightforwardly adds non-lru migration code, which means adding non-lru related code to the functions which scan over pfn and collect pages to be migrated and isolate them before migration. Signed-off-by: Yisheng Xie <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Hanjun Guo <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Reza Arbab <[email protected]> Cc: Taku Izumi <[email protected]> Cc: Vitaly Kuznetsov <[email protected]> Cc: Xishi Qiu <[email protected]> Cc: Yisheng Xie <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 85fbe5d commit 0efadf4

File tree

2 files changed

+23
-13
lines changed

2 files changed

+23
-13
lines changed

mm/memory_hotplug.c

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1531,10 +1531,10 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
15311531
}
15321532

15331533
/*
1534-
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages
1535-
* and hugepages). We scan pfn because it's much easier than scanning over
1536-
* linked list. This function returns the pfn of the first found movable
1537-
* page if it's found, otherwise 0.
1534+
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1535+
* non-lru movable pages and hugepages). We scan pfn because it's much
1536+
* easier than scanning over linked list. This function returns the pfn
1537+
* of the first found movable page if it's found, otherwise 0.
15381538
*/
15391539
static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
15401540
{
@@ -1545,6 +1545,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
15451545
page = pfn_to_page(pfn);
15461546
if (PageLRU(page))
15471547
return pfn;
1548+
if (__PageMovable(page))
1549+
return pfn;
15481550
if (PageHuge(page)) {
15491551
if (page_huge_active(page))
15501552
return pfn;
@@ -1621,21 +1623,25 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
16211623
if (!get_page_unless_zero(page))
16221624
continue;
16231625
/*
1624-
* We can skip free pages. And we can only deal with pages on
1625-
* LRU.
1626+
* We can skip free pages. And we can deal with pages on
1627+
* LRU and non-lru movable pages.
16261628
*/
1627-
ret = isolate_lru_page(page);
1629+
if (PageLRU(page))
1630+
ret = isolate_lru_page(page);
1631+
else
1632+
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
16281633
if (!ret) { /* Success */
16291634
put_page(page);
16301635
list_add_tail(&page->lru, &source);
16311636
move_pages--;
1632-
inc_node_page_state(page, NR_ISOLATED_ANON +
1633-
page_is_file_cache(page));
1637+
if (!__PageMovable(page))
1638+
inc_node_page_state(page, NR_ISOLATED_ANON +
1639+
page_is_file_cache(page));
16341640

16351641
} else {
16361642
#ifdef CONFIG_DEBUG_VM
1637-
pr_alert("removing pfn %lx from LRU failed\n", pfn);
1638-
dump_page(page, "failed to remove from LRU");
1643+
pr_alert("failed to isolate pfn %lx\n", pfn);
1644+
dump_page(page, "isolation failed");
16391645
#endif
16401646
put_page(page);
16411647
/* Because we don't have big zone->lock. we should

mm/page_alloc.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7227,8 +7227,9 @@ void *__init alloc_large_system_hash(const char *tablename,
72277227
* If @count is not zero, it is okay to include less @count unmovable pages
72287228
*
72297229
* PageLRU check without isolation or lru_lock could race so that
7230-
* MIGRATE_MOVABLE block might include unmovable pages. It means you can't
7231-
* expect this function should be exact.
7230+
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7231+
* check without lock_page also may miss some movable non-lru pages at
7232+
* race condition. So you can't expect this function should be exact.
72327233
*/
72337234
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
72347235
bool skip_hwpoisoned_pages)
@@ -7284,6 +7285,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
72847285
if (skip_hwpoisoned_pages && PageHWPoison(page))
72857286
continue;
72867287

7288+
if (__PageMovable(page))
7289+
continue;
7290+
72877291
if (!PageLRU(page))
72887292
found++;
72897293
/*

0 commit comments

Comments
 (0)