Skip to content

Commit dab4ead

Browse files
tehcastertorvalds
authored andcommitted
mm, page_owner: make init_pages_in_zone() faster
In init_pages_in_zone() we currently use the generic set_page_owner() function to initialize page_owner info for early allocated pages. This means we needlessly do lookup_page_ext() twice for each page, and more importantly save_stack(), which has to unwind the stack and find the corresponding stack depot handle. Because the stack is always the same for the initialization, unwind it once in init_pages_in_zone() and reuse the handle. Also avoid the repeated lookup_page_ext(). This can significantly reduce boot times with page_owner=on on large machines, especially for kernels built without frame pointer, where the stack unwinding is noticeably slower. [[email protected]: don't duplicate code of __set_page_owner(), per Michal Hocko] [[email protected]: coding-style fixes] [[email protected]: create statically allocated fake stack trace for early allocated pages, per Michal] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Vlastimil Babka <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Yang Shi <[email protected]> Cc: Laura Abbott <[email protected]> Cc: Vinayak Menon <[email protected]> Cc: zhong jiang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent b95046b commit dab4ead

File tree

1 file changed

+32
-20
lines changed

1 file changed

+32
-20
lines changed

mm/page_owner.c

Lines changed: 32 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ DEFINE_STATIC_KEY_FALSE(page_owner_inited);
3030

3131
static depot_stack_handle_t dummy_handle;
3232
static depot_stack_handle_t failure_handle;
33+
static depot_stack_handle_t early_handle;
3334

3435
static void init_early_allocated_pages(void);
3536

@@ -53,7 +54,7 @@ static bool need_page_owner(void)
5354
return true;
5455
}
5556

56-
static noinline void register_dummy_stack(void)
57+
static __always_inline depot_stack_handle_t create_dummy_stack(void)
5758
{
5859
unsigned long entries[4];
5960
struct stack_trace dummy;
@@ -64,21 +65,22 @@ static noinline void register_dummy_stack(void)
6465
dummy.skip = 0;
6566

6667
save_stack_trace(&dummy);
67-
dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
68+
return depot_save_stack(&dummy, GFP_KERNEL);
6869
}
6970

70-
static noinline void register_failure_stack(void)
71+
static noinline void register_dummy_stack(void)
7172
{
72-
unsigned long entries[4];
73-
struct stack_trace failure;
73+
dummy_handle = create_dummy_stack();
74+
}
7475

75-
failure.nr_entries = 0;
76-
failure.max_entries = ARRAY_SIZE(entries);
77-
failure.entries = &entries[0];
78-
failure.skip = 0;
76+
static noinline void register_failure_stack(void)
77+
{
78+
failure_handle = create_dummy_stack();
79+
}
7980

80-
save_stack_trace(&failure);
81-
failure_handle = depot_save_stack(&failure, GFP_KERNEL);
81+
static noinline void register_early_stack(void)
82+
{
83+
early_handle = create_dummy_stack();
8284
}
8385

8486
static void init_page_owner(void)
@@ -88,6 +90,7 @@ static void init_page_owner(void)
8890

8991
register_dummy_stack();
9092
register_failure_stack();
93+
register_early_stack();
9194
static_branch_enable(&page_owner_inited);
9295
init_early_allocated_pages();
9396
}
@@ -165,24 +168,33 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
165168
return handle;
166169
}
167170

168-
noinline void __set_page_owner(struct page *page, unsigned int order,
169-
gfp_t gfp_mask)
171+
static inline void __set_page_owner_handle(struct page_ext *page_ext,
172+
depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
170173
{
171-
struct page_ext *page_ext = lookup_page_ext(page);
172174
struct page_owner *page_owner;
173175

174-
if (unlikely(!page_ext))
175-
return;
176-
177176
page_owner = get_page_owner(page_ext);
178-
page_owner->handle = save_stack(gfp_mask);
177+
page_owner->handle = handle;
179178
page_owner->order = order;
180179
page_owner->gfp_mask = gfp_mask;
181180
page_owner->last_migrate_reason = -1;
182181

183182
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
184183
}
185184

185+
noinline void __set_page_owner(struct page *page, unsigned int order,
186+
gfp_t gfp_mask)
187+
{
188+
struct page_ext *page_ext = lookup_page_ext(page);
189+
depot_stack_handle_t handle;
190+
191+
if (unlikely(!page_ext))
192+
return;
193+
194+
handle = save_stack(gfp_mask);
195+
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
196+
}
197+
186198
void __set_page_owner_migrate_reason(struct page *page, int reason)
187199
{
188200
struct page_ext *page_ext = lookup_page_ext(page);
@@ -565,12 +577,12 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
565577
if (unlikely(!page_ext))
566578
continue;
567579

568-
/* Maybe overraping zone */
580+
/* Maybe overlapping zone */
569581
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
570582
continue;
571583

572584
/* Found early allocated page */
573-
set_page_owner(page, 0, 0);
585+
__set_page_owner_handle(page_ext, early_handle, 0, 0);
574586
count++;
575587
}
576588
}

0 commit comments

Comments
 (0)