Skip to content

Commit 0cad3ff

Browse files
committed
Merge branch 'akpm' (Fixes from Andrew)
Merge misc fixes from Andrew Morton. * emailed patches from Andrew Morton <[email protected]>: (12 patches) revert "mm: fix-up zone present pages" tmpfs: change final i_blocks BUG to WARNING tmpfs: fix shmem_getpage_gfp() VM_BUG_ON mm: highmem: don't treat PKMAP_ADDR(LAST_PKMAP) as a highmem address mm: revert "mm: vmscan: scale number of pages reclaimed by reclaim/compaction based on failures" rapidio: fix kernel-doc warnings swapfile: fix name leak in swapoff memcg: fix hotplugged memory zone oops mips, arc: fix build failure memcg: oom: fix totalpages calculation for memory.swappiness==0 mm: fix build warning for uninitialized value mm: add anon_vma_lock to validate_mm()
2 parents 1d567e1 + 5576646 commit 0cad3ff

File tree

19 files changed

+86
-120
lines changed

19 files changed

+86
-120
lines changed

Documentation/cgroups/memory.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -466,6 +466,10 @@ Note:
466466
5.3 swappiness
467467

468468
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
469+
Please note that unlike the global swappiness, memcg knob set to 0
470+
really prevents from any swapping even if there is a swap storage
471+
available. This might lead to memcg OOM killer if there are no file
472+
pages to reclaim.
469473

470474
Following cgroups' swappiness can't be changed.
471475
- root cgroup (uses /proc/sys/vm/swappiness).

arch/ia64/mm/init.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -637,7 +637,6 @@ mem_init (void)
637637

638638
high_memory = __va(max_low_pfn * PAGE_SIZE);
639639

640-
reset_zone_present_pages();
641640
for_each_online_pgdat(pgdat)
642641
if (pgdat->bdata->node_bootmem_map)
643642
totalram_pages += free_all_bootmem_node(pgdat);

arch/mips/fw/arc/misc.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
*/
1212
#include <linux/init.h>
1313
#include <linux/kernel.h>
14+
#include <linux/irqflags.h>
1415

1516
#include <asm/bcache.h>
1617

drivers/rapidio/rio.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
401401
/**
402402
* rio_map_inb_region -- Map inbound memory region.
403403
* @mport: Master port.
404-
* @lstart: physical address of memory region to be mapped
404+
* @local: physical address of memory region to be mapped
405405
* @rbase: RIO base address assigned to this window
406406
* @size: Size of the memory region
407407
* @rflags: Flags for mapping.

include/linux/mm.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
16841684
static inline bool page_is_guard(struct page *page) { return false; }
16851685
#endif /* CONFIG_DEBUG_PAGEALLOC */
16861686

1687-
extern void reset_zone_present_pages(void);
1688-
extern void fixup_zone_present_pages(int nid, unsigned long start_pfn,
1689-
unsigned long end_pfn);
1690-
16911687
#endif /* __KERNEL__ */
16921688
#endif /* _LINUX_MM_H */

include/linux/mmzone.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
752752
unsigned long size,
753753
enum memmap_context context);
754754

755-
extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
755+
extern void lruvec_init(struct lruvec *lruvec);
756756

757757
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
758758
{

include/linux/rio.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -275,9 +275,11 @@ struct rio_id_table {
275275
* struct rio_net - RIO network info
276276
* @node: Node in global list of RIO networks
277277
* @devices: List of devices in this network
278+
* @switches: List of switches in this netowrk
278279
* @mports: List of master ports accessing this network
279280
* @hport: Default port for accessing this network
280281
* @id: RIO network ID
282+
* @destid_table: destID allocation table
281283
*/
282284
struct rio_net {
283285
struct list_head node; /* node in list of networks */

mm/bootmem.c

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
198198
int order = ilog2(BITS_PER_LONG);
199199

200200
__free_pages_bootmem(pfn_to_page(start), order);
201-
fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
202-
start, start + BITS_PER_LONG);
203201
count += BITS_PER_LONG;
204202
start += BITS_PER_LONG;
205203
} else {
@@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
210208
if (vec & 1) {
211209
page = pfn_to_page(start + off);
212210
__free_pages_bootmem(page, 0);
213-
fixup_zone_present_pages(
214-
page_to_nid(page),
215-
start + off, start + off + 1);
216211
count++;
217212
}
218213
vec >>= 1;
@@ -226,11 +221,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
226221
pages = bdata->node_low_pfn - bdata->node_min_pfn;
227222
pages = bootmem_bootmap_pages(pages);
228223
count += pages;
229-
while (pages--) {
230-
fixup_zone_present_pages(page_to_nid(page),
231-
page_to_pfn(page), page_to_pfn(page) + 1);
224+
while (pages--)
232225
__free_pages_bootmem(page++, 0);
233-
}
234226

235227
bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
236228

mm/highmem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ struct page *kmap_to_page(void *vaddr)
9898
{
9999
unsigned long addr = (unsigned long)vaddr;
100100

101-
if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
101+
if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
102102
int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
103103
return pte_page(pkmap_page_table[i]);
104104
}

mm/memcontrol.c

Lines changed: 50 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
10551055
struct mem_cgroup *memcg)
10561056
{
10571057
struct mem_cgroup_per_zone *mz;
1058+
struct lruvec *lruvec;
10581059

1059-
if (mem_cgroup_disabled())
1060-
return &zone->lruvec;
1060+
if (mem_cgroup_disabled()) {
1061+
lruvec = &zone->lruvec;
1062+
goto out;
1063+
}
10611064

10621065
mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1063-
return &mz->lruvec;
1066+
lruvec = &mz->lruvec;
1067+
out:
1068+
/*
1069+
* Since a node can be onlined after the mem_cgroup was created,
1070+
* we have to be prepared to initialize lruvec->zone here;
1071+
* and if offlined then reonlined, we need to reinitialize it.
1072+
*/
1073+
if (unlikely(lruvec->zone != zone))
1074+
lruvec->zone = zone;
1075+
return lruvec;
10641076
}
10651077

10661078
/*
@@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
10871099
struct mem_cgroup_per_zone *mz;
10881100
struct mem_cgroup *memcg;
10891101
struct page_cgroup *pc;
1102+
struct lruvec *lruvec;
10901103

1091-
if (mem_cgroup_disabled())
1092-
return &zone->lruvec;
1104+
if (mem_cgroup_disabled()) {
1105+
lruvec = &zone->lruvec;
1106+
goto out;
1107+
}
10931108

10941109
pc = lookup_page_cgroup(page);
10951110
memcg = pc->mem_cgroup;
@@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
11071122
pc->mem_cgroup = memcg = root_mem_cgroup;
11081123

11091124
mz = page_cgroup_zoneinfo(memcg, page);
1110-
return &mz->lruvec;
1125+
lruvec = &mz->lruvec;
1126+
out:
1127+
/*
1128+
* Since a node can be onlined after the mem_cgroup was created,
1129+
* we have to be prepared to initialize lruvec->zone here;
1130+
* and if offlined then reonlined, we need to reinitialize it.
1131+
*/
1132+
if (unlikely(lruvec->zone != zone))
1133+
lruvec->zone = zone;
1134+
return lruvec;
11111135
}
11121136

11131137
/**
@@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
14521476
static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
14531477
{
14541478
u64 limit;
1455-
u64 memsw;
14561479

14571480
limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1458-
limit += total_swap_pages << PAGE_SHIFT;
14591481

1460-
memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
14611482
/*
1462-
* If memsw is finite and limits the amount of swap space available
1463-
* to this memcg, return that limit.
1483+
* Do not consider swap space if we cannot swap due to swappiness
14641484
*/
1465-
return min(limit, memsw);
1485+
if (mem_cgroup_swappiness(memcg)) {
1486+
u64 memsw;
1487+
1488+
limit += total_swap_pages << PAGE_SHIFT;
1489+
memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1490+
1491+
/*
1492+
* If memsw is finite and limits the amount of swap space
1493+
* available to this memcg, return that limit.
1494+
*/
1495+
limit = min(limit, memsw);
1496+
}
1497+
1498+
return limit;
14661499
}
14671500

14681501
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
@@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
36883721
static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
36893722
int node, int zid, enum lru_list lru)
36903723
{
3691-
struct mem_cgroup_per_zone *mz;
3724+
struct lruvec *lruvec;
36923725
unsigned long flags, loop;
36933726
struct list_head *list;
36943727
struct page *busy;
36953728
struct zone *zone;
36963729

36973730
zone = &NODE_DATA(node)->node_zones[zid];
3698-
mz = mem_cgroup_zoneinfo(memcg, node, zid);
3699-
list = &mz->lruvec.lists[lru];
3731+
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
3732+
list = &lruvec->lists[lru];
37003733

3701-
loop = mz->lru_size[lru];
3734+
loop = mem_cgroup_get_lru_size(lruvec, lru);
37023735
/* give some margin against EBUSY etc...*/
37033736
loop += 256;
37043737
busy = NULL;
@@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
47364769

47374770
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
47384771
mz = &pn->zoneinfo[zone];
4739-
lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
4772+
lruvec_init(&mz->lruvec);
47404773
mz->usage_in_excess = 0;
47414774
mz->on_tree = false;
47424775
mz->memcg = memcg;

mm/memory.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2527,9 +2527,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
25272527
int ret = 0;
25282528
int page_mkwrite = 0;
25292529
struct page *dirty_page = NULL;
2530-
unsigned long mmun_start; /* For mmu_notifiers */
2531-
unsigned long mmun_end; /* For mmu_notifiers */
2532-
bool mmun_called = false; /* For mmu_notifiers */
2530+
unsigned long mmun_start = 0; /* For mmu_notifiers */
2531+
unsigned long mmun_end = 0; /* For mmu_notifiers */
25332532

25342533
old_page = vm_normal_page(vma, address, orig_pte);
25352534
if (!old_page) {
@@ -2708,8 +2707,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
27082707
goto oom_free_new;
27092708

27102709
mmun_start = address & PAGE_MASK;
2711-
mmun_end = (address & PAGE_MASK) + PAGE_SIZE;
2712-
mmun_called = true;
2710+
mmun_end = mmun_start + PAGE_SIZE;
27132711
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
27142712

27152713
/*
@@ -2778,7 +2776,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
27782776
page_cache_release(new_page);
27792777
unlock:
27802778
pte_unmap_unlock(page_table, ptl);
2781-
if (mmun_called)
2779+
if (mmun_end > mmun_start)
27822780
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
27832781
if (old_page) {
27842782
/*

mm/memory_hotplug.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,6 @@ static void get_page_bootmem(unsigned long info, struct page *page,
106106
void __ref put_page_bootmem(struct page *page)
107107
{
108108
unsigned long type;
109-
struct zone *zone;
110109

111110
type = (unsigned long) page->lru.next;
112111
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
@@ -117,12 +116,6 @@ void __ref put_page_bootmem(struct page *page)
117116
set_page_private(page, 0);
118117
INIT_LIST_HEAD(&page->lru);
119118
__free_pages_bootmem(page, 0);
120-
121-
zone = page_zone(page);
122-
zone_span_writelock(zone);
123-
zone->present_pages++;
124-
zone_span_writeunlock(zone);
125-
totalram_pages++;
126119
}
127120

128121
}

mm/mmap.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,8 +334,10 @@ void validate_mm(struct mm_struct *mm)
334334
struct vm_area_struct *vma = mm->mmap;
335335
while (vma) {
336336
struct anon_vma_chain *avc;
337+
vma_lock_anon_vma(vma);
337338
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
338339
anon_vma_interval_tree_verify(avc);
340+
vma_unlock_anon_vma(vma);
339341
vma = vma->vm_next;
340342
i++;
341343
}

mm/mmzone.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -87,16 +87,12 @@ int memmap_valid_within(unsigned long pfn,
8787
}
8888
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
8989

90-
void lruvec_init(struct lruvec *lruvec, struct zone *zone)
90+
void lruvec_init(struct lruvec *lruvec)
9191
{
9292
enum lru_list lru;
9393

9494
memset(lruvec, 0, sizeof(struct lruvec));
9595

9696
for_each_lru(lru)
9797
INIT_LIST_HEAD(&lruvec->lists[lru]);
98-
99-
#ifdef CONFIG_MEMCG
100-
lruvec->zone = zone;
101-
#endif
10298
}

mm/nobootmem.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,6 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
116116
return 0;
117117

118118
__free_pages_memory(start_pfn, end_pfn);
119-
fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT),
120-
start_pfn, end_pfn);
121119

122120
return end_pfn - start_pfn;
123121
}
@@ -128,7 +126,6 @@ unsigned long __init free_low_memory_core_early(int nodeid)
128126
phys_addr_t start, end, size;
129127
u64 i;
130128

131-
reset_zone_present_pages();
132129
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
133130
count += __free_memory_core(start, end);
134131

mm/page_alloc.c

Lines changed: 1 addition & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -4505,7 +4505,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
45054505
zone->zone_pgdat = pgdat;
45064506

45074507
zone_pcp_init(zone);
4508-
lruvec_init(&zone->lruvec, zone);
4508+
lruvec_init(&zone->lruvec);
45094509
if (!size)
45104510
continue;
45114511

@@ -6098,37 +6098,3 @@ void dump_page(struct page *page)
60986098
dump_page_flags(page->flags);
60996099
mem_cgroup_print_bad_page(page);
61006100
}
6101-
6102-
/* reset zone->present_pages */
6103-
void reset_zone_present_pages(void)
6104-
{
6105-
struct zone *z;
6106-
int i, nid;
6107-
6108-
for_each_node_state(nid, N_HIGH_MEMORY) {
6109-
for (i = 0; i < MAX_NR_ZONES; i++) {
6110-
z = NODE_DATA(nid)->node_zones + i;
6111-
z->present_pages = 0;
6112-
}
6113-
}
6114-
}
6115-
6116-
/* calculate zone's present pages in buddy system */
6117-
void fixup_zone_present_pages(int nid, unsigned long start_pfn,
6118-
unsigned long end_pfn)
6119-
{
6120-
struct zone *z;
6121-
unsigned long zone_start_pfn, zone_end_pfn;
6122-
int i;
6123-
6124-
for (i = 0; i < MAX_NR_ZONES; i++) {
6125-
z = NODE_DATA(nid)->node_zones + i;
6126-
zone_start_pfn = z->zone_start_pfn;
6127-
zone_end_pfn = zone_start_pfn + z->spanned_pages;
6128-
6129-
/* if the two regions intersect */
6130-
if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn))
6131-
z->present_pages += min(end_pfn, zone_end_pfn) -
6132-
max(start_pfn, zone_start_pfn);
6133-
}
6134-
}

0 commit comments

Comments
 (0)