Skip to content

Commit 1dd214b

Browse files
x-y-ztorvalds
authored andcommitted
mm: page_alloc: avoid merging non-fallbackable pageblocks with others
This is done in addition to MIGRATE_ISOLATE pageblock merge avoidance. It prepares for the upcoming removal of the MAX_ORDER-1 alignment requirement for CMA and alloc_contig_range(). MIGRATE_HIGHATOMIC should not merge with other migratetypes like MIGRATE_ISOLATE and MIGRARTE_CMA[1], so this commit prevents that too. Remove MIGRATE_CMA and MIGRATE_ISOLATE from fallbacks list, since they are never used. [1] https://lore.kernel.org/linux-mm/[email protected]/ Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Zi Yan <[email protected]> Acked-by: Mel Gorman <[email protected]> Acked-by: David Hildenbrand <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Acked-by: Mike Rapoport <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Cc: Mike Rapoport <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent ff11a7c commit 1dd214b

File tree

2 files changed

+32
-23
lines changed

2 files changed

+32
-23
lines changed

include/linux/mmzone.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,17 @@ static inline bool is_migrate_movable(int mt)
8383
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
8484
}
8585

86+
/*
87+
* Check whether a migratetype can be merged with another migratetype.
88+
*
89+
* It is only mergeable when it can fall back to other migratetypes for
90+
* allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
91+
*/
92+
static inline bool migratetype_is_mergeable(int mt)
93+
{
94+
return mt < MIGRATE_PCPTYPES;
95+
}
96+
8697
#define for_each_migratetype_order(order, type) \
8798
for (order = 0; order < MAX_ORDER; order++) \
8899
for (type = 0; type < MIGRATE_TYPES; type++)

mm/page_alloc.c

Lines changed: 21 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1117,25 +1117,24 @@ static inline void __free_one_page(struct page *page,
11171117
}
11181118
if (order < MAX_ORDER - 1) {
11191119
/* If we are here, it means order is >= pageblock_order.
1120-
* We want to prevent merge between freepages on isolate
1121-
* pageblock and normal pageblock. Without this, pageblock
1122-
* isolation could cause incorrect freepage or CMA accounting.
1120+
* We want to prevent merge between freepages on pageblock
1121+
* without fallbacks and normal pageblock. Without this,
1122+
* pageblock isolation could cause incorrect freepage or CMA
1123+
* accounting or HIGHATOMIC accounting.
11231124
*
11241125
* We don't want to hit this code for the more frequent
11251126
* low-order merging.
11261127
*/
1127-
if (unlikely(has_isolate_pageblock(zone))) {
1128-
int buddy_mt;
1128+
int buddy_mt;
11291129

1130-
buddy_pfn = __find_buddy_pfn(pfn, order);
1131-
buddy = page + (buddy_pfn - pfn);
1132-
buddy_mt = get_pageblock_migratetype(buddy);
1130+
buddy_pfn = __find_buddy_pfn(pfn, order);
1131+
buddy = page + (buddy_pfn - pfn);
1132+
buddy_mt = get_pageblock_migratetype(buddy);
11331133

1134-
if (migratetype != buddy_mt
1135-
&& (is_migrate_isolate(migratetype) ||
1136-
is_migrate_isolate(buddy_mt)))
1137-
goto done_merging;
1138-
}
1134+
if (migratetype != buddy_mt
1135+
&& (!migratetype_is_mergeable(migratetype) ||
1136+
!migratetype_is_mergeable(buddy_mt)))
1137+
goto done_merging;
11391138
max_order = order + 1;
11401139
goto continue_merging;
11411140
}
@@ -2479,17 +2478,13 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
24792478
/*
24802479
* This array describes the order lists are fallen back to when
24812480
* the free lists for the desirable migrate type are depleted
2481+
*
2482+
* The other migratetypes do not have fallbacks.
24822483
*/
24832484
static int fallbacks[MIGRATE_TYPES][3] = {
24842485
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
24852486
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
24862487
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2487-
#ifdef CONFIG_CMA
2488-
[MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
2489-
#endif
2490-
#ifdef CONFIG_MEMORY_ISOLATION
2491-
[MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
2492-
#endif
24932488
};
24942489

24952490
#ifdef CONFIG_CMA
@@ -2795,8 +2790,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
27952790

27962791
/* Yoink! */
27972792
mt = get_pageblock_migratetype(page);
2798-
if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2799-
&& !is_migrate_cma(mt)) {
2793+
/* Only reserve normal pageblocks (i.e., they can merge with others) */
2794+
if (migratetype_is_mergeable(mt)) {
28002795
zone->nr_reserved_highatomic += pageblock_nr_pages;
28012796
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
28022797
move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
@@ -3545,8 +3540,11 @@ int __isolate_free_page(struct page *page, unsigned int order)
35453540
struct page *endpage = page + (1 << order) - 1;
35463541
for (; page < endpage; page += pageblock_nr_pages) {
35473542
int mt = get_pageblock_migratetype(page);
3548-
if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3549-
&& !is_migrate_highatomic(mt))
3543+
/*
3544+
* Only change normal pageblocks (i.e., they can merge
3545+
* with others)
3546+
*/
3547+
if (migratetype_is_mergeable(mt))
35503548
set_pageblock_migratetype(page,
35513549
MIGRATE_MOVABLE);
35523550
}

0 commit comments

Comments
 (0)