Skip to content

Commit 3eef112

Browse files
Muchun Songtorvalds
authored andcommitted
mm: list_lru: only add memcg-aware lrus to the global lru list
The non-memcg-aware lru is always skiped when traversing the global lru list, which is not efficient. We can only add the memcg-aware lru to the global lru list instead to make traversing more efficient. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Muchun Song <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Shakeel Butt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e80216d commit 3eef112

File tree

1 file changed

+16
-19
lines changed

1 file changed

+16
-19
lines changed

mm/list_lru.c

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,29 @@
1515
#include "slab.h"
1616

1717
#ifdef CONFIG_MEMCG_KMEM
18-
static LIST_HEAD(list_lrus);
18+
static LIST_HEAD(memcg_list_lrus);
1919
static DEFINE_MUTEX(list_lrus_mutex);
2020

21+
static inline bool list_lru_memcg_aware(struct list_lru *lru)
22+
{
23+
return lru->memcg_aware;
24+
}
25+
2126
static void list_lru_register(struct list_lru *lru)
2227
{
28+
if (!list_lru_memcg_aware(lru))
29+
return;
30+
2331
mutex_lock(&list_lrus_mutex);
24-
list_add(&lru->list, &list_lrus);
32+
list_add(&lru->list, &memcg_list_lrus);
2533
mutex_unlock(&list_lrus_mutex);
2634
}
2735

2836
static void list_lru_unregister(struct list_lru *lru)
2937
{
38+
if (!list_lru_memcg_aware(lru))
39+
return;
40+
3041
mutex_lock(&list_lrus_mutex);
3142
list_del(&lru->list);
3243
mutex_unlock(&list_lrus_mutex);
@@ -37,11 +48,6 @@ static int lru_shrinker_id(struct list_lru *lru)
3748
return lru->shrinker_id;
3849
}
3950

40-
static inline bool list_lru_memcg_aware(struct list_lru *lru)
41-
{
42-
return lru->memcg_aware;
43-
}
44-
4551
static inline struct list_lru_one *
4652
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
4753
{
@@ -457,9 +463,6 @@ static int memcg_update_list_lru(struct list_lru *lru,
457463
{
458464
int i;
459465

460-
if (!list_lru_memcg_aware(lru))
461-
return 0;
462-
463466
for_each_node(i) {
464467
if (memcg_update_list_lru_node(&lru->node[i],
465468
old_size, new_size))
@@ -482,9 +485,6 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
482485
{
483486
int i;
484487

485-
if (!list_lru_memcg_aware(lru))
486-
return;
487-
488488
for_each_node(i)
489489
memcg_cancel_update_list_lru_node(&lru->node[i],
490490
old_size, new_size);
@@ -497,7 +497,7 @@ int memcg_update_all_list_lrus(int new_size)
497497
int old_size = memcg_nr_cache_ids;
498498

499499
mutex_lock(&list_lrus_mutex);
500-
list_for_each_entry(lru, &list_lrus, list) {
500+
list_for_each_entry(lru, &memcg_list_lrus, list) {
501501
ret = memcg_update_list_lru(lru, old_size, new_size);
502502
if (ret)
503503
goto fail;
@@ -506,7 +506,7 @@ int memcg_update_all_list_lrus(int new_size)
506506
mutex_unlock(&list_lrus_mutex);
507507
return ret;
508508
fail:
509-
list_for_each_entry_continue_reverse(lru, &list_lrus, list)
509+
list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
510510
memcg_cancel_update_list_lru(lru, old_size, new_size);
511511
goto out;
512512
}
@@ -543,9 +543,6 @@ static void memcg_drain_list_lru(struct list_lru *lru,
543543
{
544544
int i;
545545

546-
if (!list_lru_memcg_aware(lru))
547-
return;
548-
549546
for_each_node(i)
550547
memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
551548
}
@@ -555,7 +552,7 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
555552
struct list_lru *lru;
556553

557554
mutex_lock(&list_lrus_mutex);
558-
list_for_each_entry(lru, &list_lrus, list)
555+
list_for_each_entry(lru, &memcg_list_lrus, list)
559556
memcg_drain_list_lru(lru, src_idx, dst_memcg);
560557
mutex_unlock(&list_lrus_mutex);
561558
}

0 commit comments

Comments
 (0)