Skip to content

Commit b48d8a8

Browse files
x-y-zakpm00
authored andcommitted
mm: page_isolation: move has_unmovable_pages() to mm/page_isolation.c
Patch series "Use pageblock_order for cma and alloc_contig_range alignment", v11. This patchset tries to remove the MAX_ORDER-1 alignment requirement for CMA and alloc_contig_range(). It prepares for my upcoming changes to make MAX_ORDER adjustable at boot time[1]. The MAX_ORDER - 1 alignment requirement comes from that alloc_contig_range() isolates pageblocks to remove free memory from buddy allocator but isolating only a subset of pageblocks within a page spanning across multiple pageblocks causes free page accounting issues. Isolated page might not be put into the right free list, since the code assumes the migratetype of the first pageblock as the whole free page migratetype. This is based on the discussion at [2]. To remove the requirement, this patchset: 1. isolates pages at pageblock granularity instead of max(MAX_ORDER_NR_PAEGS, pageblock_nr_pages); 2. splits free pages across the specified range or migrates in-use pages across the specified range then splits the freed page to avoid free page accounting issues (it happens when multiple pageblocks within a single page have different migratetypes); 3. only checks unmovable pages within the range instead of MAX_ORDER - 1 aligned range during isolation to avoid alloc_contig_range() failure when pageblocks within a MAX_ORDER - 1 aligned range are allocated separately. 4. returns pages not in the range as it did before. One optimization might come later: 1. make MIGRATE_ISOLATE a separate bit to be able to restore the original migratetypes when isolation fails in the middle of the range. [1] https://lore.kernel.org/linux-mm/[email protected]/ [2] https://lore.kernel.org/linux-mm/[email protected]/ This patch (of 6): has_unmovable_pages() is only used in mm/page_isolation.c. Move it from mm/page_alloc.c and make it static. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Zi Yan <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Acked-by: David Hildenbrand <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Eric Ren <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Minchan Kim <[email protected]> Cc: kernel test robot <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent c1a31a2 commit b48d8a8

File tree

3 files changed

+119
-121
lines changed

3 files changed

+119
-121
lines changed

include/linux/page-isolation.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,6 @@ static inline bool is_migrate_isolate(int migratetype)
3333
#define MEMORY_OFFLINE 0x1
3434
#define REPORT_FAILURE 0x2
3535

36-
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
37-
int migratetype, int flags);
3836
void set_pageblock_migratetype(struct page *page, int migratetype);
3937
int move_freepages_block(struct zone *zone, struct page *page,
4038
int migratetype, int *num_movable);

mm/page_alloc.c

Lines changed: 0 additions & 119 deletions
Original file line numberDiff line numberDiff line change
@@ -8918,125 +8918,6 @@ void *__init alloc_large_system_hash(const char *tablename,
89188918
return table;
89198919
}
89208920

8921-
/*
8922-
* This function checks whether pageblock includes unmovable pages or not.
8923-
*
8924-
* PageLRU check without isolation or lru_lock could race so that
8925-
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8926-
* check without lock_page also may miss some movable non-lru pages at
8927-
* race condition. So you can't expect this function should be exact.
8928-
*
8929-
* Returns a page without holding a reference. If the caller wants to
8930-
* dereference that page (e.g., dumping), it has to make sure that it
8931-
* cannot get removed (e.g., via memory unplug) concurrently.
8932-
*
8933-
*/
8934-
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8935-
int migratetype, int flags)
8936-
{
8937-
unsigned long iter = 0;
8938-
unsigned long pfn = page_to_pfn(page);
8939-
unsigned long offset = pfn % pageblock_nr_pages;
8940-
8941-
if (is_migrate_cma_page(page)) {
8942-
/*
8943-
* CMA allocations (alloc_contig_range) really need to mark
8944-
* isolate CMA pageblocks even when they are not movable in fact
8945-
* so consider them movable here.
8946-
*/
8947-
if (is_migrate_cma(migratetype))
8948-
return NULL;
8949-
8950-
return page;
8951-
}
8952-
8953-
for (; iter < pageblock_nr_pages - offset; iter++) {
8954-
page = pfn_to_page(pfn + iter);
8955-
8956-
/*
8957-
* Both, bootmem allocations and memory holes are marked
8958-
* PG_reserved and are unmovable. We can even have unmovable
8959-
* allocations inside ZONE_MOVABLE, for example when
8960-
* specifying "movablecore".
8961-
*/
8962-
if (PageReserved(page))
8963-
return page;
8964-
8965-
/*
8966-
* If the zone is movable and we have ruled out all reserved
8967-
* pages then it should be reasonably safe to assume the rest
8968-
* is movable.
8969-
*/
8970-
if (zone_idx(zone) == ZONE_MOVABLE)
8971-
continue;
8972-
8973-
/*
8974-
* Hugepages are not in LRU lists, but they're movable.
8975-
* THPs are on the LRU, but need to be counted as #small pages.
8976-
* We need not scan over tail pages because we don't
8977-
* handle each tail page individually in migration.
8978-
*/
8979-
if (PageHuge(page) || PageTransCompound(page)) {
8980-
struct page *head = compound_head(page);
8981-
unsigned int skip_pages;
8982-
8983-
if (PageHuge(page)) {
8984-
if (!hugepage_migration_supported(page_hstate(head)))
8985-
return page;
8986-
} else if (!PageLRU(head) && !__PageMovable(head)) {
8987-
return page;
8988-
}
8989-
8990-
skip_pages = compound_nr(head) - (page - head);
8991-
iter += skip_pages - 1;
8992-
continue;
8993-
}
8994-
8995-
/*
8996-
* We can't use page_count without pin a page
8997-
* because another CPU can free compound page.
8998-
* This check already skips compound tails of THP
8999-
* because their page->_refcount is zero at all time.
9000-
*/
9001-
if (!page_ref_count(page)) {
9002-
if (PageBuddy(page))
9003-
iter += (1 << buddy_order(page)) - 1;
9004-
continue;
9005-
}
9006-
9007-
/*
9008-
* The HWPoisoned page may be not in buddy system, and
9009-
* page_count() is not 0.
9010-
*/
9011-
if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
9012-
continue;
9013-
9014-
/*
9015-
* We treat all PageOffline() pages as movable when offlining
9016-
* to give drivers a chance to decrement their reference count
9017-
* in MEM_GOING_OFFLINE in order to indicate that these pages
9018-
* can be offlined as there are no direct references anymore.
9019-
* For actually unmovable PageOffline() where the driver does
9020-
* not support this, we will fail later when trying to actually
9021-
* move these pages that still have a reference count > 0.
9022-
* (false negatives in this function only)
9023-
*/
9024-
if ((flags & MEMORY_OFFLINE) && PageOffline(page))
9025-
continue;
9026-
9027-
if (__PageMovable(page) || PageLRU(page))
9028-
continue;
9029-
9030-
/*
9031-
* If there are RECLAIMABLE pages, we need to check
9032-
* it. But now, memory offline itself doesn't call
9033-
* shrink_node_slabs() and it still to be fixed.
9034-
*/
9035-
return page;
9036-
}
9037-
return NULL;
9038-
}
9039-
90408921
#ifdef CONFIG_CONTIG_ALLOC
90418922
static unsigned long pfn_max_align_down(unsigned long pfn)
90428923
{

mm/page_isolation.c

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,125 @@
1515
#define CREATE_TRACE_POINTS
1616
#include <trace/events/page_isolation.h>
1717

18+
/*
19+
* This function checks whether pageblock includes unmovable pages or not.
20+
*
21+
* PageLRU check without isolation or lru_lock could race so that
22+
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
23+
* check without lock_page also may miss some movable non-lru pages at
24+
* race condition. So you can't expect this function should be exact.
25+
*
26+
* Returns a page without holding a reference. If the caller wants to
27+
* dereference that page (e.g., dumping), it has to make sure that it
28+
* cannot get removed (e.g., via memory unplug) concurrently.
29+
*
30+
*/
31+
static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
32+
int migratetype, int flags)
33+
{
34+
unsigned long iter = 0;
35+
unsigned long pfn = page_to_pfn(page);
36+
unsigned long offset = pfn % pageblock_nr_pages;
37+
38+
if (is_migrate_cma_page(page)) {
39+
/*
40+
* CMA allocations (alloc_contig_range) really need to mark
41+
* isolate CMA pageblocks even when they are not movable in fact
42+
* so consider them movable here.
43+
*/
44+
if (is_migrate_cma(migratetype))
45+
return NULL;
46+
47+
return page;
48+
}
49+
50+
for (; iter < pageblock_nr_pages - offset; iter++) {
51+
page = pfn_to_page(pfn + iter);
52+
53+
/*
54+
* Both, bootmem allocations and memory holes are marked
55+
* PG_reserved and are unmovable. We can even have unmovable
56+
* allocations inside ZONE_MOVABLE, for example when
57+
* specifying "movablecore".
58+
*/
59+
if (PageReserved(page))
60+
return page;
61+
62+
/*
63+
* If the zone is movable and we have ruled out all reserved
64+
* pages then it should be reasonably safe to assume the rest
65+
* is movable.
66+
*/
67+
if (zone_idx(zone) == ZONE_MOVABLE)
68+
continue;
69+
70+
/*
71+
* Hugepages are not in LRU lists, but they're movable.
72+
* THPs are on the LRU, but need to be counted as #small pages.
73+
* We need not scan over tail pages because we don't
74+
* handle each tail page individually in migration.
75+
*/
76+
if (PageHuge(page) || PageTransCompound(page)) {
77+
struct page *head = compound_head(page);
78+
unsigned int skip_pages;
79+
80+
if (PageHuge(page)) {
81+
if (!hugepage_migration_supported(page_hstate(head)))
82+
return page;
83+
} else if (!PageLRU(head) && !__PageMovable(head)) {
84+
return page;
85+
}
86+
87+
skip_pages = compound_nr(head) - (page - head);
88+
iter += skip_pages - 1;
89+
continue;
90+
}
91+
92+
/*
93+
* We can't use page_count without pin a page
94+
* because another CPU can free compound page.
95+
* This check already skips compound tails of THP
96+
* because their page->_refcount is zero at all time.
97+
*/
98+
if (!page_ref_count(page)) {
99+
if (PageBuddy(page))
100+
iter += (1 << buddy_order(page)) - 1;
101+
continue;
102+
}
103+
104+
/*
105+
* The HWPoisoned page may be not in buddy system, and
106+
* page_count() is not 0.
107+
*/
108+
if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
109+
continue;
110+
111+
/*
112+
* We treat all PageOffline() pages as movable when offlining
113+
* to give drivers a chance to decrement their reference count
114+
* in MEM_GOING_OFFLINE in order to indicate that these pages
115+
* can be offlined as there are no direct references anymore.
116+
* For actually unmovable PageOffline() where the driver does
117+
* not support this, we will fail later when trying to actually
118+
* move these pages that still have a reference count > 0.
119+
* (false negatives in this function only)
120+
*/
121+
if ((flags & MEMORY_OFFLINE) && PageOffline(page))
122+
continue;
123+
124+
if (__PageMovable(page) || PageLRU(page))
125+
continue;
126+
127+
/*
128+
* If there are RECLAIMABLE pages, we need to check
129+
* it. But now, memory offline itself doesn't call
130+
* shrink_node_slabs() and it still to be fixed.
131+
*/
132+
return page;
133+
}
134+
return NULL;
135+
}
136+
18137
static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
19138
{
20139
struct zone *zone = page_zone(page);

0 commit comments

Comments
 (0)