Skip to content

Commit 02c6de8

Browse files
minchanktorvalds
authored andcommitted
mm: cma: discard clean pages during contiguous allocation instead of migration
Drop clean cache pages instead of migration during alloc_contig_range() to minimise allocation latency by reducing the amount of migration that is necessary. It's useful for CMA because latency of migration is more important than evicting the background process's working set. In addition, as pages are reclaimed then fewer free pages for migration targets are required so it avoids memory reclaiming to get free pages, which is a contributory factor to increased latency. I measured elapsed time of __alloc_contig_migrate_range() which migrates 10M in 40M movable zone in QEMU machine. Before - 146ms, After - 7ms [[email protected]: fix nommu build] Signed-off-by: Mel Gorman <[email protected]> Signed-off-by: Minchan Kim <[email protected]> Reviewed-by: Mel Gorman <[email protected]> Cc: Marek Szyprowski <[email protected]> Acked-by: Michal Nazarewicz <[email protected]> Cc: Rik van Riel <[email protected]> Tested-by: Kyungmin Park <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 7040030 commit 02c6de8

File tree

4 files changed

+52
-17
lines changed

4 files changed

+52
-17
lines changed

include/linux/rmap.h

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,17 @@ struct anon_vma_chain {
7171
#endif
7272
};
7373

74+
enum ttu_flags {
75+
TTU_UNMAP = 0, /* unmap mode */
76+
TTU_MIGRATION = 1, /* migration mode */
77+
TTU_MUNLOCK = 2, /* munlock mode */
78+
TTU_ACTION_MASK = 0xff,
79+
80+
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
81+
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
82+
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
83+
};
84+
7485
#ifdef CONFIG_MMU
7586
static inline void get_anon_vma(struct anon_vma *anon_vma)
7687
{
@@ -164,16 +175,6 @@ int page_referenced(struct page *, int is_locked,
164175
int page_referenced_one(struct page *, struct vm_area_struct *,
165176
unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
166177

167-
enum ttu_flags {
168-
TTU_UNMAP = 0, /* unmap mode */
169-
TTU_MIGRATION = 1, /* migration mode */
170-
TTU_MUNLOCK = 2, /* munlock mode */
171-
TTU_ACTION_MASK = 0xff,
172-
173-
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
174-
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
175-
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
176-
};
177178
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
178179

179180
int try_to_unmap(struct page *, enum ttu_flags flags);

mm/internal.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -356,5 +356,6 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
356356
unsigned long, unsigned long);
357357

358358
extern void set_pageblock_order(void);
359-
359+
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
360+
struct list_head *page_list);
360361
#endif /* __MM_INTERNAL_H */

mm/page_alloc.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5700,6 +5700,8 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
57005700
break;
57015701
}
57025702

5703+
reclaim_clean_pages_from_list(cc.zone, &cc.migratepages);
5704+
57035705
ret = migrate_pages(&cc.migratepages,
57045706
__alloc_contig_migrate_alloc,
57055707
0, false, MIGRATE_SYNC);

mm/vmscan.c

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -674,8 +674,10 @@ static enum page_references page_check_references(struct page *page,
674674
static unsigned long shrink_page_list(struct list_head *page_list,
675675
struct zone *zone,
676676
struct scan_control *sc,
677+
enum ttu_flags ttu_flags,
677678
unsigned long *ret_nr_dirty,
678-
unsigned long *ret_nr_writeback)
679+
unsigned long *ret_nr_writeback,
680+
bool force_reclaim)
679681
{
680682
LIST_HEAD(ret_pages);
681683
LIST_HEAD(free_pages);
@@ -689,10 +691,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
689691

690692
mem_cgroup_uncharge_start();
691693
while (!list_empty(page_list)) {
692-
enum page_references references;
693694
struct address_space *mapping;
694695
struct page *page;
695696
int may_enter_fs;
697+
enum page_references references = PAGEREF_RECLAIM_CLEAN;
696698

697699
cond_resched();
698700

@@ -758,7 +760,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
758760
wait_on_page_writeback(page);
759761
}
760762

761-
references = page_check_references(page, sc);
763+
if (!force_reclaim)
764+
references = page_check_references(page, sc);
765+
762766
switch (references) {
763767
case PAGEREF_ACTIVATE:
764768
goto activate_locked;
@@ -788,7 +792,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
788792
* processes. Try to unmap it here.
789793
*/
790794
if (page_mapped(page) && mapping) {
791-
switch (try_to_unmap(page, TTU_UNMAP)) {
795+
switch (try_to_unmap(page, ttu_flags)) {
792796
case SWAP_FAIL:
793797
goto activate_locked;
794798
case SWAP_AGAIN:
@@ -960,6 +964,33 @@ static unsigned long shrink_page_list(struct list_head *page_list,
960964
return nr_reclaimed;
961965
}
962966

967+
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
968+
struct list_head *page_list)
969+
{
970+
struct scan_control sc = {
971+
.gfp_mask = GFP_KERNEL,
972+
.priority = DEF_PRIORITY,
973+
.may_unmap = 1,
974+
};
975+
unsigned long ret, dummy1, dummy2;
976+
struct page *page, *next;
977+
LIST_HEAD(clean_pages);
978+
979+
list_for_each_entry_safe(page, next, page_list, lru) {
980+
if (page_is_file_cache(page) && !PageDirty(page)) {
981+
ClearPageActive(page);
982+
list_move(&page->lru, &clean_pages);
983+
}
984+
}
985+
986+
ret = shrink_page_list(&clean_pages, zone, &sc,
987+
TTU_UNMAP|TTU_IGNORE_ACCESS,
988+
&dummy1, &dummy2, true);
989+
list_splice(&clean_pages, page_list);
990+
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
991+
return ret;
992+
}
993+
963994
/*
964995
* Attempt to remove the specified page from its LRU. Only take this page
965996
* if it is of the appropriate PageActive status. Pages which are being
@@ -1278,8 +1309,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
12781309
if (nr_taken == 0)
12791310
return 0;
12801311

1281-
nr_reclaimed = shrink_page_list(&page_list, zone, sc,
1282-
&nr_dirty, &nr_writeback);
1312+
nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1313+
&nr_dirty, &nr_writeback, false);
12831314

12841315
spin_lock_irq(&zone->lru_lock);
12851316

0 commit comments

Comments
 (0)