15
15
#include "slab.h"
16
16
17
17
#ifdef CONFIG_MEMCG_KMEM
18
- static LIST_HEAD (list_lrus );
18
+ static LIST_HEAD (memcg_list_lrus );
19
19
static DEFINE_MUTEX (list_lrus_mutex );
20
20
21
+ static inline bool list_lru_memcg_aware (struct list_lru * lru )
22
+ {
23
+ return lru -> memcg_aware ;
24
+ }
25
+
21
26
static void list_lru_register (struct list_lru * lru )
22
27
{
28
+ if (!list_lru_memcg_aware (lru ))
29
+ return ;
30
+
23
31
mutex_lock (& list_lrus_mutex );
24
- list_add (& lru -> list , & list_lrus );
32
+ list_add (& lru -> list , & memcg_list_lrus );
25
33
mutex_unlock (& list_lrus_mutex );
26
34
}
27
35
28
36
static void list_lru_unregister (struct list_lru * lru )
29
37
{
38
+ if (!list_lru_memcg_aware (lru ))
39
+ return ;
40
+
30
41
mutex_lock (& list_lrus_mutex );
31
42
list_del (& lru -> list );
32
43
mutex_unlock (& list_lrus_mutex );
@@ -37,11 +48,6 @@ static int lru_shrinker_id(struct list_lru *lru)
37
48
return lru -> shrinker_id ;
38
49
}
39
50
40
- static inline bool list_lru_memcg_aware (struct list_lru * lru )
41
- {
42
- return lru -> memcg_aware ;
43
- }
44
-
45
51
static inline struct list_lru_one *
46
52
list_lru_from_memcg_idx (struct list_lru_node * nlru , int idx )
47
53
{
@@ -457,9 +463,6 @@ static int memcg_update_list_lru(struct list_lru *lru,
457
463
{
458
464
int i ;
459
465
460
- if (!list_lru_memcg_aware (lru ))
461
- return 0 ;
462
-
463
466
for_each_node (i ) {
464
467
if (memcg_update_list_lru_node (& lru -> node [i ],
465
468
old_size , new_size ))
@@ -482,9 +485,6 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
482
485
{
483
486
int i ;
484
487
485
- if (!list_lru_memcg_aware (lru ))
486
- return ;
487
-
488
488
for_each_node (i )
489
489
memcg_cancel_update_list_lru_node (& lru -> node [i ],
490
490
old_size , new_size );
@@ -497,7 +497,7 @@ int memcg_update_all_list_lrus(int new_size)
497
497
int old_size = memcg_nr_cache_ids ;
498
498
499
499
mutex_lock (& list_lrus_mutex );
500
- list_for_each_entry (lru , & list_lrus , list ) {
500
+ list_for_each_entry (lru , & memcg_list_lrus , list ) {
501
501
ret = memcg_update_list_lru (lru , old_size , new_size );
502
502
if (ret )
503
503
goto fail ;
@@ -506,7 +506,7 @@ int memcg_update_all_list_lrus(int new_size)
506
506
mutex_unlock (& list_lrus_mutex );
507
507
return ret ;
508
508
fail :
509
- list_for_each_entry_continue_reverse (lru , & list_lrus , list )
509
+ list_for_each_entry_continue_reverse (lru , & memcg_list_lrus , list )
510
510
memcg_cancel_update_list_lru (lru , old_size , new_size );
511
511
goto out ;
512
512
}
@@ -543,9 +543,6 @@ static void memcg_drain_list_lru(struct list_lru *lru,
543
543
{
544
544
int i ;
545
545
546
- if (!list_lru_memcg_aware (lru ))
547
- return ;
548
-
549
546
for_each_node (i )
550
547
memcg_drain_list_lru_node (lru , i , src_idx , dst_memcg );
551
548
}
@@ -555,7 +552,7 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
555
552
struct list_lru * lru ;
556
553
557
554
mutex_lock (& list_lrus_mutex );
558
- list_for_each_entry (lru , & list_lrus , list )
555
+ list_for_each_entry (lru , & memcg_list_lrus , list )
559
556
memcg_drain_list_lru (lru , src_idx , dst_memcg );
560
557
mutex_unlock (& list_lrus_mutex );
561
558
}
0 commit comments