Skip to content

Commit fd25a9e

Browse files
shakeelbtorvalds
authored andcommitted
memcg: unify memcg stat flushing
The memcg stats can be flushed in multiple context and potentially in parallel too. For example multiple parallel user space readers for memcg stats will contend on the rstat locks with each other. There is no need for that. We just need one flusher and everyone else can benefit. In addition after aa48e47 ("memcg: infrastructure to flush memcg stats") the kernel periodically flush the memcg stats from the root, so, the other flushers will potentially have much less work to do. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Shakeel Butt <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Michal Koutný" <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 11192d9 commit fd25a9e

File tree

1 file changed

+10
-9
lines changed

1 file changed

+10
-9
lines changed

mm/memcontrol.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -660,12 +660,14 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg)
660660

661661
static void __mem_cgroup_flush_stats(void)
662662
{
663-
if (!spin_trylock(&stats_flush_lock))
663+
unsigned long flag;
664+
665+
if (!spin_trylock_irqsave(&stats_flush_lock, flag))
664666
return;
665667

666668
cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
667669
atomic_set(&stats_flush_threshold, 0);
668-
spin_unlock(&stats_flush_lock);
670+
spin_unlock_irqrestore(&stats_flush_lock, flag);
669671
}
670672

671673
void mem_cgroup_flush_stats(void)
@@ -1461,7 +1463,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
14611463
*
14621464
* Current memory state:
14631465
*/
1464-
cgroup_rstat_flush(memcg->css.cgroup);
1466+
mem_cgroup_flush_stats();
14651467

14661468
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
14671469
u64 size;
@@ -3565,8 +3567,7 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
35653567
unsigned long val;
35663568

35673569
if (mem_cgroup_is_root(memcg)) {
3568-
/* mem_cgroup_threshold() calls here from irqsafe context */
3569-
cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
3570+
mem_cgroup_flush_stats();
35703571
val = memcg_page_state(memcg, NR_FILE_PAGES) +
35713572
memcg_page_state(memcg, NR_ANON_MAPPED);
35723573
if (swap)
@@ -3947,7 +3948,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
39473948
int nid;
39483949
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
39493950

3950-
cgroup_rstat_flush(memcg->css.cgroup);
3951+
mem_cgroup_flush_stats();
39513952

39523953
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
39533954
seq_printf(m, "%s=%lu", stat->name,
@@ -4019,7 +4020,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
40194020

40204021
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
40214022

4022-
cgroup_rstat_flush(memcg->css.cgroup);
4023+
mem_cgroup_flush_stats();
40234024

40244025
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
40254026
unsigned long nr;
@@ -4522,7 +4523,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
45224523
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
45234524
struct mem_cgroup *parent;
45244525

4525-
cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
4526+
mem_cgroup_flush_stats();
45264527

45274528
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
45284529
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
@@ -6405,7 +6406,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
64056406
int i;
64066407
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
64076408

6408-
cgroup_rstat_flush(memcg->css.cgroup);
6409+
mem_cgroup_flush_stats();
64096410

64106411
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
64116412
int nid;

0 commit comments

Comments
 (0)