|
35 | 35 | #include <linux/hugetlb.h>
|
36 | 36 | #include <linux/hugetlb_cgroup.h>
|
37 | 37 | #include <linux/gfp.h>
|
| 38 | +#include <linux/balloon_compaction.h> |
38 | 39 |
|
39 | 40 | #include <asm/tlbflush.h>
|
40 | 41 |
|
@@ -79,7 +80,10 @@ void putback_lru_pages(struct list_head *l)
|
79 | 80 | list_del(&page->lru);
|
80 | 81 | dec_zone_page_state(page, NR_ISOLATED_ANON +
|
81 | 82 | page_is_file_cache(page));
|
82 |
| - putback_lru_page(page); |
| 83 | + if (unlikely(balloon_page_movable(page))) |
| 84 | + balloon_page_putback(page); |
| 85 | + else |
| 86 | + putback_lru_page(page); |
83 | 87 | }
|
84 | 88 | }
|
85 | 89 |
|
@@ -768,6 +772,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
768 | 772 | }
|
769 | 773 | }
|
770 | 774 |
|
| 775 | + if (unlikely(balloon_page_movable(page))) { |
| 776 | + /* |
| 777 | + * A ballooned page does not need any special attention from |
| 778 | + * physical to virtual reverse mapping procedures. |
| 779 | + * Skip any attempt to unmap PTEs or to remap swap cache, |
| 780 | + * in order to avoid burning cycles at rmap level, and perform |
| 781 | + * the page migration right away (proteced by page lock). |
| 782 | + */ |
| 783 | + rc = balloon_page_migrate(newpage, page, mode); |
| 784 | + goto uncharge; |
| 785 | + } |
| 786 | + |
771 | 787 | /*
|
772 | 788 | * Corner case handling:
|
773 | 789 | * 1. When a new swap-cache page is read into, it is added to the LRU
|
@@ -804,7 +820,9 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
804 | 820 | put_anon_vma(anon_vma);
|
805 | 821 |
|
806 | 822 | uncharge:
|
807 |
| - mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS); |
| 823 | + mem_cgroup_end_migration(mem, page, newpage, |
| 824 | + (rc == MIGRATEPAGE_SUCCESS || |
| 825 | + rc == MIGRATEPAGE_BALLOON_SUCCESS)); |
808 | 826 | unlock:
|
809 | 827 | unlock_page(page);
|
810 | 828 | out:
|
@@ -836,6 +854,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
|
836 | 854 | goto out;
|
837 | 855 |
|
838 | 856 | rc = __unmap_and_move(page, newpage, force, offlining, mode);
|
| 857 | + |
| 858 | + if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { |
| 859 | + /* |
| 860 | + * A ballooned page has been migrated already. |
| 861 | + * Now, it's the time to wrap-up counters, |
| 862 | + * handle the page back to Buddy and return. |
| 863 | + */ |
| 864 | + dec_zone_page_state(page, NR_ISOLATED_ANON + |
| 865 | + page_is_file_cache(page)); |
| 866 | + balloon_page_free(page); |
| 867 | + return MIGRATEPAGE_SUCCESS; |
| 868 | + } |
839 | 869 | out:
|
840 | 870 | if (rc != -EAGAIN) {
|
841 | 871 | /*
|
|
0 commit comments