Skip to content

Commit d46d025

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge various fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <[email protected]>: mm, page_alloc: recalculate the preferred zoneref if the context can ignore memory policies mm, page_alloc: reset zonelist iterator after resetting fair zone allocation policy mm, oom_reaper: do not use siglock in try_oom_reaper() mm, page_alloc: prevent infinite loop in buffered_rmqueue() checkpatch: reduce git commit description style false positives mm/z3fold.c: avoid modifying HEADLESS page and minor cleanup memcg: add RCU locking around css_for_each_descendant_pre() in memcg_offline_kmem() mm: check the return value of lookup_page_ext for all call sites kdump: fix dmesg gdbmacro to work with record based printk mm: fix overflow in vm_map_ram()
2 parents 8c52b6d + e46e7b7 commit d46d025

File tree

11 files changed

+205
-50
lines changed

11 files changed

+205
-50
lines changed

Documentation/kdump/gdbmacros.txt

Lines changed: 82 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -170,21 +170,92 @@ document trapinfo
170170
address the kernel panicked.
171171
end
172172

173+
define dump_log_idx
174+
set $idx = $arg0
175+
if ($argc > 1)
176+
set $prev_flags = $arg1
177+
else
178+
set $prev_flags = 0
179+
end
180+
set $msg = ((struct printk_log *) (log_buf + $idx))
181+
set $prefix = 1
182+
set $newline = 1
183+
set $log = log_buf + $idx + sizeof(*$msg)
173184

174-
define dmesg
175-
set $i = 0
176-
set $end_idx = (log_end - 1) & (log_buf_len - 1)
185+
# prev & LOG_CONT && !(msg->flags & LOG_PREIX)
186+
if (($prev_flags & 8) && !($msg->flags & 4))
187+
set $prefix = 0
188+
end
189+
190+
# msg->flags & LOG_CONT
191+
if ($msg->flags & 8)
192+
# (prev & LOG_CONT && !(prev & LOG_NEWLINE))
193+
if (($prev_flags & 8) && !($prev_flags & 2))
194+
set $prefix = 0
195+
end
196+
# (!(msg->flags & LOG_NEWLINE))
197+
if (!($msg->flags & 2))
198+
set $newline = 0
199+
end
200+
end
201+
202+
if ($prefix)
203+
printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
204+
end
205+
if ($msg->text_len != 0)
206+
eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
207+
end
208+
if ($newline)
209+
printf "\n"
210+
end
211+
if ($msg->dict_len > 0)
212+
set $dict = $log + $msg->text_len
213+
set $idx = 0
214+
set $line = 1
215+
while ($idx < $msg->dict_len)
216+
if ($line)
217+
printf " "
218+
set $line = 0
219+
end
220+
set $c = $dict[$idx]
221+
if ($c == '\0')
222+
printf "\n"
223+
set $line = 1
224+
else
225+
if ($c < ' ' || $c >= 127 || $c == '\\')
226+
printf "\\x%02x", $c
227+
else
228+
printf "%c", $c
229+
end
230+
end
231+
set $idx = $idx + 1
232+
end
233+
printf "\n"
234+
end
235+
end
236+
document dump_log_idx
237+
Dump a single log given its index in the log buffer. The first
238+
parameter is the index into log_buf, the second is optional and
239+
specified the previous log buffer's flags, used for properly
240+
formatting continued lines.
241+
end
177242

178-
while ($i < logged_chars)
179-
set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
243+
define dmesg
244+
set $i = log_first_idx
245+
set $end_idx = log_first_idx
246+
set $prev_flags = 0
180247

181-
if ($idx + 100 <= $end_idx) || \
182-
($end_idx <= $idx && $idx + 100 < log_buf_len)
183-
printf "%.100s", &log_buf[$idx]
184-
set $i = $i + 100
248+
while (1)
249+
set $msg = ((struct printk_log *) (log_buf + $i))
250+
if ($msg->len == 0)
251+
set $i = 0
185252
else
186-
printf "%c", log_buf[$idx]
187-
set $i = $i + 1
253+
dump_log_idx $i $prev_flags
254+
set $i = $i + $msg->len
255+
set $prev_flags = $msg->flags
256+
end
257+
if ($i == $end_idx)
258+
loop_break
188259
end
189260
end
190261
end

include/linux/page_idle.h

Lines changed: 36 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
4646

4747
static inline bool page_is_young(struct page *page)
4848
{
49-
return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
49+
struct page_ext *page_ext = lookup_page_ext(page);
50+
51+
if (unlikely(!page_ext))
52+
return false;
53+
54+
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
5055
}
5156

5257
static inline void set_page_young(struct page *page)
5358
{
54-
set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
59+
struct page_ext *page_ext = lookup_page_ext(page);
60+
61+
if (unlikely(!page_ext))
62+
return;
63+
64+
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
5565
}
5666

5767
static inline bool test_and_clear_page_young(struct page *page)
5868
{
59-
return test_and_clear_bit(PAGE_EXT_YOUNG,
60-
&lookup_page_ext(page)->flags);
69+
struct page_ext *page_ext = lookup_page_ext(page);
70+
71+
if (unlikely(!page_ext))
72+
return false;
73+
74+
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
6175
}
6276

6377
static inline bool page_is_idle(struct page *page)
6478
{
65-
return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
79+
struct page_ext *page_ext = lookup_page_ext(page);
80+
81+
if (unlikely(!page_ext))
82+
return false;
83+
84+
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
6685
}
6786

6887
static inline void set_page_idle(struct page *page)
6988
{
70-
set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
89+
struct page_ext *page_ext = lookup_page_ext(page);
90+
91+
if (unlikely(!page_ext))
92+
return;
93+
94+
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
7195
}
7296

7397
static inline void clear_page_idle(struct page *page)
7498
{
75-
clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
99+
struct page_ext *page_ext = lookup_page_ext(page);
100+
101+
if (unlikely(!page_ext))
102+
return;
103+
104+
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
76105
}
77106
#endif /* CONFIG_64BIT */
78107

mm/memcontrol.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2896,13 +2896,16 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
28962896
* ordering is imposed by list_lru_node->lock taken by
28972897
* memcg_drain_all_list_lrus().
28982898
*/
2899+
rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
28992900
css_for_each_descendant_pre(css, &memcg->css) {
29002901
child = mem_cgroup_from_css(css);
29012902
BUG_ON(child->kmemcg_id != kmemcg_id);
29022903
child->kmemcg_id = parent->kmemcg_id;
29032904
if (!memcg->use_hierarchy)
29042905
break;
29052906
}
2907+
rcu_read_unlock();
2908+
29062909
memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
29072910

29082911
memcg_free_cache_id(kmemcg_id);

mm/oom_kill.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk)
625625
if (atomic_read(&mm->mm_users) > 1) {
626626
rcu_read_lock();
627627
for_each_process(p) {
628-
bool exiting;
629-
630628
if (!process_shares_mm(p, mm))
631629
continue;
632630
if (fatal_signal_pending(p))
@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk)
636634
* If the task is exiting make sure the whole thread group
637635
* is exiting and cannot acces mm anymore.
638636
*/
639-
spin_lock_irq(&p->sighand->siglock);
640-
exiting = signal_group_exit(p->signal);
641-
spin_unlock_irq(&p->sighand->siglock);
642-
if (exiting)
637+
if (signal_group_exit(p->signal))
643638
continue;
644639

645640
/* Give up */

mm/page_alloc.c

Lines changed: 28 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
656656
return;
657657

658658
page_ext = lookup_page_ext(page);
659+
if (unlikely(!page_ext))
660+
return;
661+
659662
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
660663

661664
INIT_LIST_HEAD(&page->lru);
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
673676
return;
674677

675678
page_ext = lookup_page_ext(page);
679+
if (unlikely(!page_ext))
680+
return;
681+
676682
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
677683

678684
set_page_private(page, 0);
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
26092615
page = list_last_entry(list, struct page, lru);
26102616
else
26112617
page = list_first_entry(list, struct page, lru);
2612-
} while (page && check_new_pcp(page));
26132618

2614-
__dec_zone_state(zone, NR_ALLOC_BATCH);
2615-
list_del(&page->lru);
2616-
pcp->count--;
2619+
__dec_zone_state(zone, NR_ALLOC_BATCH);
2620+
list_del(&page->lru);
2621+
pcp->count--;
2622+
2623+
} while (check_new_pcp(page));
26172624
} else {
26182625
/*
26192626
* We most definitely don't want callers attempting to
@@ -3023,6 +3030,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
30233030
apply_fair = false;
30243031
fair_skipped = false;
30253032
reset_alloc_batches(ac->preferred_zoneref->zone);
3033+
z = ac->preferred_zoneref;
30263034
goto zonelist_scan;
30273035
}
30283036

@@ -3596,6 +3604,17 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
35963604
*/
35973605
alloc_flags = gfp_to_alloc_flags(gfp_mask);
35983606

3607+
/*
3608+
* Reset the zonelist iterators if memory policies can be ignored.
3609+
* These allocations are high priority and system rather than user
3610+
* orientated.
3611+
*/
3612+
if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
3613+
ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3614+
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3615+
ac->high_zoneidx, ac->nodemask);
3616+
}
3617+
35993618
/* This is the last chance, in general, before the goto nopage. */
36003619
page = get_page_from_freelist(gfp_mask, order,
36013620
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3623,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
36043623

36053624
/* Allocate without watermarks if the context allows */
36063625
if (alloc_flags & ALLOC_NO_WATERMARKS) {
3607-
/*
3608-
* Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
3609-
* the allocation is high priority and these type of
3610-
* allocations are system rather than user orientated
3611-
*/
3612-
ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
36133626
page = get_page_from_freelist(gfp_mask, order,
36143627
ALLOC_NO_WATERMARKS, ac);
36153628
if (page)
@@ -3808,7 +3821,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
38083821
/* Dirty zone balancing only done in the fast path */
38093822
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
38103823

3811-
/* The preferred zone is used for statistics later */
3824+
/*
3825+
* The preferred zone is used for statistics but crucially it is
3826+
* also used as the starting point for the zonelist iterator. It
3827+
* may get reset for allocations that ignore memory policies.
3828+
*/
38123829
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
38133830
ac.high_zoneidx, ac.nodemask);
38143831
if (!ac.preferred_zoneref) {

mm/page_owner.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,20 +55,26 @@ void __reset_page_owner(struct page *page, unsigned int order)
5555

5656
for (i = 0; i < (1 << order); i++) {
5757
page_ext = lookup_page_ext(page + i);
58+
if (unlikely(!page_ext))
59+
continue;
5860
__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
5961
}
6062
}
6163

6264
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
6365
{
6466
struct page_ext *page_ext = lookup_page_ext(page);
67+
6568
struct stack_trace trace = {
6669
.nr_entries = 0,
6770
.max_entries = ARRAY_SIZE(page_ext->trace_entries),
6871
.entries = &page_ext->trace_entries[0],
6972
.skip = 3,
7073
};
7174

75+
if (unlikely(!page_ext))
76+
return;
77+
7278
save_stack_trace(&trace);
7379

7480
page_ext->order = order;
@@ -82,13 +88,21 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
8288
void __set_page_owner_migrate_reason(struct page *page, int reason)
8389
{
8490
struct page_ext *page_ext = lookup_page_ext(page);
91+
if (unlikely(!page_ext))
92+
return;
8593

8694
page_ext->last_migrate_reason = reason;
8795
}
8896

8997
gfp_t __get_page_owner_gfp(struct page *page)
9098
{
9199
struct page_ext *page_ext = lookup_page_ext(page);
100+
if (unlikely(!page_ext))
101+
/*
102+
* The caller just returns 0 if no valid gfp
103+
* So return 0 here too.
104+
*/
105+
return 0;
92106

93107
return page_ext->gfp_mask;
94108
}
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
99113
struct page_ext *new_ext = lookup_page_ext(newpage);
100114
int i;
101115

116+
if (unlikely(!old_ext || !new_ext))
117+
return;
118+
102119
new_ext->order = old_ext->order;
103120
new_ext->gfp_mask = old_ext->gfp_mask;
104121
new_ext->nr_entries = old_ext->nr_entries;
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
193210
gfp_t gfp_mask = page_ext->gfp_mask;
194211
int mt = gfpflags_to_migratetype(gfp_mask);
195212

213+
if (unlikely(!page_ext)) {
214+
pr_alert("There is not page extension available.\n");
215+
return;
216+
}
217+
196218
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
197219
pr_alert("page_owner info is not active (free page?)\n");
198220
return;
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
251273
}
252274

253275
page_ext = lookup_page_ext(page);
276+
if (unlikely(!page_ext))
277+
continue;
254278

255279
/*
256280
* Some pages could be missed by concurrent allocation or free,
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
317341
continue;
318342

319343
page_ext = lookup_page_ext(page);
344+
if (unlikely(!page_ext))
345+
continue;
320346

321347
/* Maybe overraping zone */
322348
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))

0 commit comments

Comments
 (0)