Skip to content

Commit cc07903

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "Three fixes" * emailed patches from Andrew Morton <[email protected]>: mm/ksm.c: don't WARN if page is still mapped in remove_stable_node() mm/memory_hotplug: don't access uninitialized memmaps in shrink_zone_span() Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
2 parents a6b0373 + 9a63236 commit cc07903

File tree

3 files changed

+53
-33
lines changed

3 files changed

+53
-33
lines changed

fs/ocfs2/xattr.c

Lines changed: 33 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
14901490
return loc->xl_ops->xlo_check_space(loc, xi);
14911491
}
14921492

1493+
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
1494+
{
1495+
loc->xl_ops->xlo_add_entry(loc, name_hash);
1496+
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
1497+
/*
1498+
* We can't leave the new entry's xe_name_offset at zero or
1499+
* add_namevalue() will go nuts. We set it to the size of our
1500+
* storage so that it can never be less than any other entry.
1501+
*/
1502+
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
1503+
}
1504+
14931505
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
14941506
struct ocfs2_xattr_info *xi)
14951507
{
@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
21212133
if (rc)
21222134
goto out;
21232135

2124-
if (!loc->xl_entry) {
2125-
rc = -EINVAL;
2126-
goto out;
2127-
}
2128-
2129-
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
2130-
orig_value_size = loc->xl_entry->xe_value_size;
2131-
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
2132-
if (rc)
2133-
goto out;
2134-
goto alloc_value;
2135-
}
2136+
if (loc->xl_entry) {
2137+
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
2138+
orig_value_size = loc->xl_entry->xe_value_size;
2139+
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
2140+
if (rc)
2141+
goto out;
2142+
goto alloc_value;
2143+
}
21362144

2137-
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
2138-
orig_clusters = ocfs2_xa_value_clusters(loc);
2139-
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2140-
if (rc) {
2141-
mlog_errno(rc);
2142-
ocfs2_xa_cleanup_value_truncate(loc,
2143-
"overwriting",
2144-
orig_clusters);
2145-
goto out;
2145+
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
2146+
orig_clusters = ocfs2_xa_value_clusters(loc);
2147+
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
2148+
if (rc) {
2149+
mlog_errno(rc);
2150+
ocfs2_xa_cleanup_value_truncate(loc,
2151+
"overwriting",
2152+
orig_clusters);
2153+
goto out;
2154+
}
21462155
}
2147-
}
2148-
ocfs2_xa_wipe_namevalue(loc);
2156+
ocfs2_xa_wipe_namevalue(loc);
2157+
} else
2158+
ocfs2_xa_add_entry(loc, name_hash);
21492159

21502160
/*
21512161
* If we get here, we have a blank entry. Fill it. We grow our

mm/ksm.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node)
885885
return 0;
886886
}
887887

888-
if (WARN_ON_ONCE(page_mapped(page))) {
889-
/*
890-
* This should not happen: but if it does, just refuse to let
891-
* merge_across_nodes be switched - there is no need to panic.
892-
*/
893-
err = -EBUSY;
894-
} else {
888+
/*
889+
* Page could be still mapped if this races with __mmput() running in
890+
* between ksm_exit() and exit_mmap(). Just refuse to let
891+
* merge_across_nodes/max_page_sharing be switched.
892+
*/
893+
err = -EBUSY;
894+
if (!page_mapped(page)) {
895895
/*
896896
* The stable node did not yet appear stale to get_ksm_page(),
897897
* since that allows for an unmapped ksm page to be recognized

mm/memory_hotplug.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
331331
unsigned long end_pfn)
332332
{
333333
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
334-
if (unlikely(!pfn_valid(start_pfn)))
334+
if (unlikely(!pfn_to_online_page(start_pfn)))
335335
continue;
336336

337337
if (unlikely(pfn_to_nid(start_pfn) != nid))
@@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
356356
/* pfn is the end pfn of a memory section. */
357357
pfn = end_pfn - 1;
358358
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
359-
if (unlikely(!pfn_valid(pfn)))
359+
if (unlikely(!pfn_to_online_page(pfn)))
360360
continue;
361361

362362
if (unlikely(pfn_to_nid(pfn) != nid))
@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
415415
*/
416416
pfn = zone_start_pfn;
417417
for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
418-
if (unlikely(!pfn_valid(pfn)))
418+
if (unlikely(!pfn_to_online_page(pfn)))
419419
continue;
420420

421421
if (page_zone(pfn_to_page(pfn)) != zone)
@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
471471
struct pglist_data *pgdat = zone->zone_pgdat;
472472
unsigned long flags;
473473

474+
#ifdef CONFIG_ZONE_DEVICE
475+
/*
476+
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
477+
* we will not try to shrink the zones - which is okay as
478+
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
479+
*/
480+
if (zone_idx(zone) == ZONE_DEVICE)
481+
return;
482+
#endif
483+
474484
pgdat_resize_lock(zone->zone_pgdat, &flags);
475485
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
476486
update_pgdat_span(pgdat);

0 commit comments

Comments
 (0)