Skip to content

Commit bf6bddf

Browse files
aquinitorvalds
authored andcommitted
mm: introduce compaction and migration for ballooned pages
Memory fragmentation introduced by ballooning might reduce significantly the number of 2MB contiguous memory blocks that can be used within a guest, thus imposing performance penalties associated with the reduced number of transparent huge pages that could be used by the guest workload. This patch introduces the helper functions as well as the necessary changes to teach compaction and migration bits how to cope with pages which are part of a guest memory balloon, in order to make them movable by memory compaction procedures. Signed-off-by: Rafael Aquini <[email protected]> Acked-by: Mel Gorman <[email protected]> Cc: Rusty Russell <[email protected]> Cc: "Michael S. Tsirkin" <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Konrad Rzeszutek Wilk <[email protected]> Cc: Minchan Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 18468d9 commit bf6bddf

File tree

2 files changed

+51
-4
lines changed

2 files changed

+51
-4
lines changed

mm/compaction.c

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/backing-dev.h>
1515
#include <linux/sysctl.h>
1616
#include <linux/sysfs.h>
17+
#include <linux/balloon_compaction.h>
1718
#include "internal.h"
1819

1920
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
@@ -565,9 +566,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
565566
goto next_pageblock;
566567
}
567568

568-
/* Check may be lockless but that's ok as we recheck later */
569-
if (!PageLRU(page))
569+
/*
570+
* Check may be lockless but that's ok as we recheck later.
571+
* It's possible to migrate LRU pages and balloon pages
572+
* Skip any other type of page
573+
*/
574+
if (!PageLRU(page)) {
575+
if (unlikely(balloon_page_movable(page))) {
576+
if (locked && balloon_page_isolate(page)) {
577+
/* Successfully isolated */
578+
cc->finished_update_migrate = true;
579+
list_add(&page->lru, migratelist);
580+
cc->nr_migratepages++;
581+
nr_isolated++;
582+
goto check_compact_cluster;
583+
}
584+
}
570585
continue;
586+
}
571587

572588
/*
573589
* PageLRU is set. lru_lock normally excludes isolation
@@ -621,6 +637,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
621637
cc->nr_migratepages++;
622638
nr_isolated++;
623639

640+
check_compact_cluster:
624641
/* Avoid isolating too much */
625642
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
626643
++low_pfn;

mm/migrate.c

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <linux/hugetlb.h>
3636
#include <linux/hugetlb_cgroup.h>
3737
#include <linux/gfp.h>
38+
#include <linux/balloon_compaction.h>
3839

3940
#include <asm/tlbflush.h>
4041

@@ -79,7 +80,10 @@ void putback_lru_pages(struct list_head *l)
7980
list_del(&page->lru);
8081
dec_zone_page_state(page, NR_ISOLATED_ANON +
8182
page_is_file_cache(page));
82-
putback_lru_page(page);
83+
if (unlikely(balloon_page_movable(page)))
84+
balloon_page_putback(page);
85+
else
86+
putback_lru_page(page);
8387
}
8488
}
8589

@@ -768,6 +772,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
768772
}
769773
}
770774

775+
if (unlikely(balloon_page_movable(page))) {
776+
/*
777+
* A ballooned page does not need any special attention from
778+
* physical to virtual reverse mapping procedures.
779+
* Skip any attempt to unmap PTEs or to remap swap cache,
780+
* in order to avoid burning cycles at rmap level, and perform
781+
* the page migration right away (proteced by page lock).
782+
*/
783+
rc = balloon_page_migrate(newpage, page, mode);
784+
goto uncharge;
785+
}
786+
771787
/*
772788
* Corner case handling:
773789
* 1. When a new swap-cache page is read into, it is added to the LRU
@@ -804,7 +820,9 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
804820
put_anon_vma(anon_vma);
805821

806822
uncharge:
807-
mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS);
823+
mem_cgroup_end_migration(mem, page, newpage,
824+
(rc == MIGRATEPAGE_SUCCESS ||
825+
rc == MIGRATEPAGE_BALLOON_SUCCESS));
808826
unlock:
809827
unlock_page(page);
810828
out:
@@ -836,6 +854,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
836854
goto out;
837855

838856
rc = __unmap_and_move(page, newpage, force, offlining, mode);
857+
858+
if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
859+
/*
860+
* A ballooned page has been migrated already.
861+
* Now, it's the time to wrap-up counters,
862+
* handle the page back to Buddy and return.
863+
*/
864+
dec_zone_page_state(page, NR_ISOLATED_ANON +
865+
page_is_file_cache(page));
866+
balloon_page_free(page);
867+
return MIGRATEPAGE_SUCCESS;
868+
}
839869
out:
840870
if (rc != -EAGAIN) {
841871
/*

0 commit comments

Comments
 (0)