Skip to content

Commit 97500a4

Browse files
djbwtorvalds
authored andcommitted
mm: maintain randomization of page free lists
When freeing a page with an order >= shuffle_page_order randomly select the front or back of the list for insertion. While the mm tries to defragment physical pages into huge pages this can tend to make the page allocator more predictable over time. Inject the front-back randomness to preserve the initial randomness established by shuffle_free_memory() when the kernel was booted. The overhead of this manipulation is constrained by only being applied for MAX_ORDER sized pages by default. [[email protected]: coding-style fixes] Link: http://lkml.kernel.org/r/154899812788.3165233.9066631950746578517.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <[email protected]> Reviewed-by: Kees Cook <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Keith Busch <[email protected]> Cc: Robert Elliott <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent b03641a commit 97500a4

File tree

4 files changed

+56
-2
lines changed

4 files changed

+56
-2
lines changed

include/linux/mmzone.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,18 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar
116116
area->nr_free++;
117117
}
118118

119+
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
120+
/* Used to preserve page allocation order entropy */
121+
void add_to_free_area_random(struct page *page, struct free_area *area,
122+
int migratetype);
123+
#else
124+
static inline void add_to_free_area_random(struct page *page,
125+
struct free_area *area, int migratetype)
126+
{
127+
add_to_free_area(page, area, migratetype);
128+
}
129+
#endif
130+
119131
/* Used for pages which are on another list */
120132
static inline void move_to_free_area(struct page *page, struct free_area *area,
121133
int migratetype)

mm/page_alloc.c

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
#include <linux/mempolicy.h>
4444
#include <linux/memremap.h>
4545
#include <linux/stop_machine.h>
46+
#include <linux/random.h>
4647
#include <linux/sort.h>
4748
#include <linux/pfn.h>
4849
#include <linux/backing-dev.h>
@@ -958,7 +959,8 @@ static inline void __free_one_page(struct page *page,
958959
* so it's less likely to be used soon and more likely to be merged
959960
* as a higher order page
960961
*/
961-
if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
962+
if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
963+
&& !is_shuffle_order(order)) {
962964
struct page *higher_page, *higher_buddy;
963965
combined_pfn = buddy_pfn & pfn;
964966
higher_page = page + (combined_pfn - pfn);
@@ -972,7 +974,12 @@ static inline void __free_one_page(struct page *page,
972974
}
973975
}
974976

975-
add_to_free_area(page, &zone->free_area[order], migratetype);
977+
if (is_shuffle_order(order))
978+
add_to_free_area_random(page, &zone->free_area[order],
979+
migratetype);
980+
else
981+
add_to_free_area(page, &zone->free_area[order], migratetype);
982+
976983
}
977984

978985
/*

mm/shuffle.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,3 +182,26 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat)
182182
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
183183
shuffle_zone(z);
184184
}
185+
186+
void add_to_free_area_random(struct page *page, struct free_area *area,
187+
int migratetype)
188+
{
189+
static u64 rand;
190+
static u8 rand_bits;
191+
192+
/*
193+
* The lack of locking is deliberate. If 2 threads race to
194+
* update the rand state it just adds to the entropy.
195+
*/
196+
if (rand_bits == 0) {
197+
rand_bits = 64;
198+
rand = get_random_u64();
199+
}
200+
201+
if (rand & 1)
202+
add_to_free_area(page, area, migratetype);
203+
else
204+
add_to_free_area_tail(page, area, migratetype);
205+
rand_bits--;
206+
rand >>= 1;
207+
}

mm/shuffle.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,13 @@ static inline void shuffle_zone(struct zone *z)
3636
return;
3737
__shuffle_zone(z);
3838
}
39+
40+
static inline bool is_shuffle_order(int order)
41+
{
42+
if (!static_branch_unlikely(&page_alloc_shuffle_key))
43+
return false;
44+
return order >= SHUFFLE_ORDER;
45+
}
3946
#else
4047
static inline void shuffle_free_memory(pg_data_t *pgdat)
4148
{
@@ -48,5 +55,10 @@ static inline void shuffle_zone(struct zone *z)
4855
static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
4956
{
5057
}
58+
59+
static inline bool is_shuffle_order(int order)
60+
{
61+
return false;
62+
}
5163
#endif
5264
#endif /* _MM_SHUFFLE_H */

0 commit comments

Comments
 (0)