Skip to content

Commit b03641a

Browse files
djbwtorvalds
authored andcommitted
mm: move buddy list manipulations into helpers
In preparation for runtime randomization of the zone lists, take all (well, most of) the list_*() functions in the buddy allocator and put them in helper functions. Provide a common control point for injecting additional behavior when freeing pages. [[email protected]: fix buddy list helpers] Link: http://lkml.kernel.org/r/155033679702.1773410.13041474192173212653.stgit@dwillia2-desk3.amr.corp.intel.com [[email protected]: remove del_page_from_free_area() migratetype parameter] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/154899812264.3165233.5219320056406926223.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]> Tested-by: Tetsuo Handa <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Kees Cook <[email protected]> Cc: Keith Busch <[email protected]> Cc: Robert Elliott <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e900a91 commit b03641a

File tree

5 files changed

+73
-48
lines changed

5 files changed

+73
-48
lines changed

include/linux/mm.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -536,9 +536,6 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
536536
struct mmu_gather;
537537
struct inode;
538538

539-
#define page_private(page) ((page)->private)
540-
#define set_page_private(page, v) ((page)->private = (v))
541-
542539
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
543540
static inline int pmd_devmap(pmd_t pmd)
544541
{

include/linux/mm_types.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,9 @@ struct page {
220220
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
221221
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
222222

223+
#define page_private(page) ((page)->private)
224+
#define set_page_private(page, v) ((page)->private = (v))
225+
223226
struct page_frag_cache {
224227
void * va;
225228
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)

include/linux/mmzone.h

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#include <linux/pageblock-flags.h>
1919
#include <linux/page-flags-layout.h>
2020
#include <linux/atomic.h>
21+
#include <linux/mm_types.h>
22+
#include <linux/page-flags.h>
2123
#include <asm/page.h>
2224

2325
/* Free memory management - zoned buddy allocator. */
@@ -98,6 +100,50 @@ struct free_area {
98100
unsigned long nr_free;
99101
};
100102

103+
/* Used for pages not on another list */
104+
static inline void add_to_free_area(struct page *page, struct free_area *area,
105+
int migratetype)
106+
{
107+
list_add(&page->lru, &area->free_list[migratetype]);
108+
area->nr_free++;
109+
}
110+
111+
/* Used for pages not on another list */
112+
static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
113+
int migratetype)
114+
{
115+
list_add_tail(&page->lru, &area->free_list[migratetype]);
116+
area->nr_free++;
117+
}
118+
119+
/* Used for pages which are on another list */
120+
static inline void move_to_free_area(struct page *page, struct free_area *area,
121+
int migratetype)
122+
{
123+
list_move(&page->lru, &area->free_list[migratetype]);
124+
}
125+
126+
static inline struct page *get_page_from_free_area(struct free_area *area,
127+
int migratetype)
128+
{
129+
return list_first_entry_or_null(&area->free_list[migratetype],
130+
struct page, lru);
131+
}
132+
133+
static inline void del_page_from_free_area(struct page *page,
134+
struct free_area *area)
135+
{
136+
list_del(&page->lru);
137+
__ClearPageBuddy(page);
138+
set_page_private(page, 0);
139+
area->nr_free--;
140+
}
141+
142+
static inline bool free_area_empty(struct free_area *area, int migratetype)
143+
{
144+
return list_empty(&area->free_list[migratetype]);
145+
}
146+
101147
struct pglist_data;
102148

103149
/*

mm/compaction.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1888,13 +1888,13 @@ static enum compact_result __compact_finished(struct compact_control *cc)
18881888
bool can_steal;
18891889

18901890
/* Job done if page is free of the right migratetype */
1891-
if (!list_empty(&area->free_list[migratetype]))
1891+
if (!free_area_empty(area, migratetype))
18921892
return COMPACT_SUCCESS;
18931893

18941894
#ifdef CONFIG_CMA
18951895
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
18961896
if (migratetype == MIGRATE_MOVABLE &&
1897-
!list_empty(&area->free_list[MIGRATE_CMA]))
1897+
!free_area_empty(area, MIGRATE_CMA))
18981898
return COMPACT_SUCCESS;
18991899
#endif
19001900
/*

mm/page_alloc.c

Lines changed: 22 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -756,12 +756,6 @@ static inline void set_page_order(struct page *page, unsigned int order)
756756
__SetPageBuddy(page);
757757
}
758758

759-
static inline void rmv_page_order(struct page *page)
760-
{
761-
__ClearPageBuddy(page);
762-
set_page_private(page, 0);
763-
}
764-
765759
/*
766760
* This function checks whether a page is free && is the buddy
767761
* we can coalesce a page and its buddy if
@@ -919,13 +913,10 @@ static inline void __free_one_page(struct page *page,
919913
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
920914
* merge with it and move up one order.
921915
*/
922-
if (page_is_guard(buddy)) {
916+
if (page_is_guard(buddy))
923917
clear_page_guard(zone, buddy, order, migratetype);
924-
} else {
925-
list_del(&buddy->lru);
926-
zone->free_area[order].nr_free--;
927-
rmv_page_order(buddy);
928-
}
918+
else
919+
del_page_from_free_area(buddy, &zone->free_area[order]);
929920
combined_pfn = buddy_pfn & pfn;
930921
page = page + (combined_pfn - pfn);
931922
pfn = combined_pfn;
@@ -975,15 +966,13 @@ static inline void __free_one_page(struct page *page,
975966
higher_buddy = higher_page + (buddy_pfn - combined_pfn);
976967
if (pfn_valid_within(buddy_pfn) &&
977968
page_is_buddy(higher_page, higher_buddy, order + 1)) {
978-
list_add_tail(&page->lru,
979-
&zone->free_area[order].free_list[migratetype]);
980-
goto out;
969+
add_to_free_area_tail(page, &zone->free_area[order],
970+
migratetype);
971+
return;
981972
}
982973
}
983974

984-
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
985-
out:
986-
zone->free_area[order].nr_free++;
975+
add_to_free_area(page, &zone->free_area[order], migratetype);
987976
}
988977

989978
/*
@@ -1974,8 +1963,7 @@ static inline void expand(struct zone *zone, struct page *page,
19741963
if (set_page_guard(zone, &page[size], high, migratetype))
19751964
continue;
19761965

1977-
list_add(&page[size].lru, &area->free_list[migratetype]);
1978-
area->nr_free++;
1966+
add_to_free_area(&page[size], area, migratetype);
19791967
set_page_order(&page[size], high);
19801968
}
19811969
}
@@ -2117,13 +2105,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
21172105
/* Find a page of the appropriate size in the preferred list */
21182106
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
21192107
area = &(zone->free_area[current_order]);
2120-
page = list_first_entry_or_null(&area->free_list[migratetype],
2121-
struct page, lru);
2108+
page = get_page_from_free_area(area, migratetype);
21222109
if (!page)
21232110
continue;
2124-
list_del(&page->lru);
2125-
rmv_page_order(page);
2126-
area->nr_free--;
2111+
del_page_from_free_area(page, area);
21272112
expand(zone, page, order, current_order, area, migratetype);
21282113
set_pcppage_migratetype(page, migratetype);
21292114
return page;
@@ -2209,8 +2194,7 @@ static int move_freepages(struct zone *zone,
22092194
}
22102195

22112196
order = page_order(page);
2212-
list_move(&page->lru,
2213-
&zone->free_area[order].free_list[migratetype]);
2197+
move_to_free_area(page, &zone->free_area[order], migratetype);
22142198
page += 1 << order;
22152199
pages_moved += 1 << order;
22162200
}
@@ -2398,7 +2382,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
23982382

23992383
single_page:
24002384
area = &zone->free_area[current_order];
2401-
list_move(&page->lru, &area->free_list[start_type]);
2385+
move_to_free_area(page, area, start_type);
24022386
}
24032387

24042388
/*
@@ -2422,7 +2406,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
24222406
if (fallback_mt == MIGRATE_TYPES)
24232407
break;
24242408

2425-
if (list_empty(&area->free_list[fallback_mt]))
2409+
if (free_area_empty(area, fallback_mt))
24262410
continue;
24272411

24282412
if (can_steal_fallback(order, migratetype))
@@ -2509,9 +2493,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
25092493
for (order = 0; order < MAX_ORDER; order++) {
25102494
struct free_area *area = &(zone->free_area[order]);
25112495

2512-
page = list_first_entry_or_null(
2513-
&area->free_list[MIGRATE_HIGHATOMIC],
2514-
struct page, lru);
2496+
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
25152497
if (!page)
25162498
continue;
25172499

@@ -2634,8 +2616,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
26342616
VM_BUG_ON(current_order == MAX_ORDER);
26352617

26362618
do_steal:
2637-
page = list_first_entry(&area->free_list[fallback_mt],
2638-
struct page, lru);
2619+
page = get_page_from_free_area(area, fallback_mt);
26392620

26402621
steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
26412622
can_steal);
@@ -3072,6 +3053,7 @@ EXPORT_SYMBOL_GPL(split_page);
30723053

30733054
int __isolate_free_page(struct page *page, unsigned int order)
30743055
{
3056+
struct free_area *area = &page_zone(page)->free_area[order];
30753057
unsigned long watermark;
30763058
struct zone *zone;
30773059
int mt;
@@ -3096,9 +3078,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
30963078
}
30973079

30983080
/* Remove page from free list */
3099-
list_del(&page->lru);
3100-
zone->free_area[order].nr_free--;
3101-
rmv_page_order(page);
3081+
3082+
del_page_from_free_area(page, area);
31023083

31033084
/*
31043085
* Set the pageblock if the isolated page is at least half of a
@@ -3395,13 +3376,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
33953376
continue;
33963377

33973378
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3398-
if (!list_empty(&area->free_list[mt]))
3379+
if (!free_area_empty(area, mt))
33993380
return true;
34003381
}
34013382

34023383
#ifdef CONFIG_CMA
34033384
if ((alloc_flags & ALLOC_CMA) &&
3404-
!list_empty(&area->free_list[MIGRATE_CMA])) {
3385+
!free_area_empty(area, MIGRATE_CMA)) {
34053386
return true;
34063387
}
34073388
#endif
@@ -5328,7 +5309,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
53285309

53295310
types[order] = 0;
53305311
for (type = 0; type < MIGRATE_TYPES; type++) {
5331-
if (!list_empty(&area->free_list[type]))
5312+
if (!free_area_empty(area, type))
53325313
types[order] |= 1 << type;
53335314
}
53345315
}
@@ -8501,9 +8482,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
85018482
pr_info("remove from free list %lx %d %lx\n",
85028483
pfn, 1 << order, end_pfn);
85038484
#endif
8504-
list_del(&page->lru);
8505-
rmv_page_order(page);
8506-
zone->free_area[order].nr_free--;
8485+
del_page_from_free_area(page, &zone->free_area[order]);
85078486
for (i = 0; i < (1 << order); i++)
85088487
SetPageReserved((page+i));
85098488
pfn += (1 << order);

0 commit comments

Comments
 (0)