Skip to content

Commit d02bd27

Browse files
Igor Redkotorvalds
authored andcommitted
mm/page_alloc.c: calculate 'available' memory in a separate function
Add a new field, VIRTIO_BALLOON_S_AVAIL, to virtio_balloon memory statistics protocol, corresponding to 'Available' in /proc/meminfo. It indicates to the hypervisor how big the balloon can be inflated without pushing the guest system to swap. This metric would be very useful in VM orchestration software to improve memory management of different VMs under overcommit. This patch (of 2): Factor out calculation of the available memory counter into a separate exportable function, in order to be able to use it in other parts of the kernel. In particular, it appears a relevant metric to report to the hypervisor via virtio-balloon statistics interface (in a followup patch). Signed-off-by: Igor Redko <[email protected]> Signed-off-by: Denis V. Lunev <[email protected]> Reviewed-by: Roman Kagan <[email protected]> Cc: Michael S. Tsirkin <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 7eb5029 commit d02bd27

File tree

3 files changed

+45
-30
lines changed

3 files changed

+45
-30
lines changed

fs/proc/meminfo.c

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
2929
unsigned long committed;
3030
long cached;
3131
long available;
32-
unsigned long pagecache;
33-
unsigned long wmark_low = 0;
3432
unsigned long pages[NR_LRU_LISTS];
35-
struct zone *zone;
3633
int lru;
3734

3835
/*
@@ -51,33 +48,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
5148
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5249
pages[lru] = global_page_state(NR_LRU_BASE + lru);
5350

54-
for_each_zone(zone)
55-
wmark_low += zone->watermark[WMARK_LOW];
56-
57-
/*
58-
* Estimate the amount of memory available for userspace allocations,
59-
* without causing swapping.
60-
*/
61-
available = i.freeram - totalreserve_pages;
62-
63-
/*
64-
* Not all the page cache can be freed, otherwise the system will
65-
* start swapping. Assume at least half of the page cache, or the
66-
* low watermark worth of cache, needs to stay.
67-
*/
68-
pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
69-
pagecache -= min(pagecache / 2, wmark_low);
70-
available += pagecache;
71-
72-
/*
73-
* Part of the reclaimable slab consists of items that are in use,
74-
* and cannot be freed. Cap this estimate at the low watermark.
75-
*/
76-
available += global_page_state(NR_SLAB_RECLAIMABLE) -
77-
min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
78-
79-
if (available < 0)
80-
available = 0;
51+
available = si_mem_available();
8152

8253
/*
8354
* Tagged format, for easy grepping and expansion.

include/linux/mm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1875,6 +1875,7 @@ extern int __meminit init_per_zone_wmark_min(void);
18751875
extern void mem_init(void);
18761876
extern void __init mmap_init(void);
18771877
extern void show_mem(unsigned int flags);
1878+
extern long si_mem_available(void);
18781879
extern void si_meminfo(struct sysinfo * val);
18791880
extern void si_meminfo_node(struct sysinfo *val, int nid);
18801881

mm/page_alloc.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3713,6 +3713,49 @@ static inline void show_node(struct zone *zone)
37133713
printk("Node %d ", zone_to_nid(zone));
37143714
}
37153715

3716+
long si_mem_available(void)
3717+
{
3718+
long available;
3719+
unsigned long pagecache;
3720+
unsigned long wmark_low = 0;
3721+
unsigned long pages[NR_LRU_LISTS];
3722+
struct zone *zone;
3723+
int lru;
3724+
3725+
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
3726+
pages[lru] = global_page_state(NR_LRU_BASE + lru);
3727+
3728+
for_each_zone(zone)
3729+
wmark_low += zone->watermark[WMARK_LOW];
3730+
3731+
/*
3732+
* Estimate the amount of memory available for userspace allocations,
3733+
* without causing swapping.
3734+
*/
3735+
available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
3736+
3737+
/*
3738+
* Not all the page cache can be freed, otherwise the system will
3739+
* start swapping. Assume at least half of the page cache, or the
3740+
* low watermark worth of cache, needs to stay.
3741+
*/
3742+
pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
3743+
pagecache -= min(pagecache / 2, wmark_low);
3744+
available += pagecache;
3745+
3746+
/*
3747+
* Part of the reclaimable slab consists of items that are in use,
3748+
* and cannot be freed. Cap this estimate at the low watermark.
3749+
*/
3750+
available += global_page_state(NR_SLAB_RECLAIMABLE) -
3751+
min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
3752+
3753+
if (available < 0)
3754+
available = 0;
3755+
return available;
3756+
}
3757+
EXPORT_SYMBOL_GPL(si_mem_available);
3758+
37163759
void si_meminfo(struct sysinfo *val)
37173760
{
37183761
val->totalram = totalram_pages;

0 commit comments

Comments
 (0)