Skip to content

Commit 0a432dc

Browse files
Yang Shitorvalds
authored andcommitted
mm: shrinker: make shrinker not depend on memcg kmem
Currently shrinker is just allocated and can work when memcg kmem is enabled. But, THP deferred split shrinker is not slab shrinker, it doesn't make too much sense to have such shrinker depend on memcg kmem. It should be able to reclaim THP even though memcg kmem is disabled. Introduce a new shrinker flag, SHRINKER_NONSLAB, for non-slab shrinker. When memcg kmem is disabled, just such shrinkers can be called in shrinking memcg slab. [[email protected]: add comment] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Yang Shi <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Reviewed-by: Kirill Tkhai <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Kirill A . Shutemov" <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: David Rientjes <[email protected]> Cc: Qian Cai <[email protected]> Cc: Vladimir Davydov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 7ae8853 commit 0a432dc

File tree

4 files changed

+49
-46
lines changed

4 files changed

+49
-46
lines changed

include/linux/memcontrol.h

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,8 @@ struct mem_cgroup_per_node {
128128

129129
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
130130

131-
#ifdef CONFIG_MEMCG_KMEM
132131
struct memcg_shrinker_map __rcu *shrinker_map;
133-
#endif
132+
134133
struct rb_node tree_node; /* RB tree node */
135134
unsigned long usage_in_excess;/* Set to the value by which */
136135
/* the soft limit is exceeded*/
@@ -1311,6 +1310,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
13111310
} while ((memcg = parent_mem_cgroup(memcg)));
13121311
return false;
13131312
}
1313+
1314+
extern int memcg_expand_shrinker_maps(int new_id);
1315+
1316+
extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1317+
int nid, int shrinker_id);
13141318
#else
13151319
#define mem_cgroup_sockets_enabled 0
13161320
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1319,6 +1323,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
13191323
{
13201324
return false;
13211325
}
1326+
1327+
static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1328+
int nid, int shrinker_id)
1329+
{
1330+
}
13221331
#endif
13231332

13241333
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
@@ -1390,10 +1399,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
13901399
return memcg ? memcg->kmemcg_id : -1;
13911400
}
13921401

1393-
extern int memcg_expand_shrinker_maps(int new_id);
1394-
1395-
extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1396-
int nid, int shrinker_id);
13971402
#else
13981403

13991404
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1435,8 +1440,6 @@ static inline void memcg_put_cache_ids(void)
14351440
{
14361441
}
14371442

1438-
static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1439-
int nid, int shrinker_id) { }
14401443
#endif /* CONFIG_MEMCG_KMEM */
14411444

14421445
#endif /* _LINUX_MEMCONTROL_H */

include/linux/shrinker.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ struct shrinker {
6969

7070
/* These are for internal use */
7171
struct list_head list;
72-
#ifdef CONFIG_MEMCG_KMEM
72+
#ifdef CONFIG_MEMCG
7373
/* ID in shrinker_idr */
7474
int id;
7575
#endif
@@ -81,6 +81,11 @@ struct shrinker {
8181
/* Flags */
8282
#define SHRINKER_NUMA_AWARE (1 << 0)
8383
#define SHRINKER_MEMCG_AWARE (1 << 1)
84+
/*
85+
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
86+
* non-MEMCG_AWARE shrinker should not have this flag set.
87+
*/
88+
#define SHRINKER_NONSLAB (1 << 2)
8489

8590
extern int prealloc_shrinker(struct shrinker *shrinker);
8691
extern void register_shrinker_prepared(struct shrinker *shrinker);

mm/memcontrol.c

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -318,6 +318,7 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
318318
EXPORT_SYMBOL(memcg_kmem_enabled_key);
319319

320320
struct workqueue_struct *memcg_kmem_cache_wq;
321+
#endif
321322

322323
static int memcg_shrinker_map_size;
323324
static DEFINE_MUTEX(memcg_shrinker_map_mutex);
@@ -441,14 +442,6 @@ void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
441442
}
442443
}
443444

444-
#else /* CONFIG_MEMCG_KMEM */
445-
static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
446-
{
447-
return 0;
448-
}
449-
static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
450-
#endif /* CONFIG_MEMCG_KMEM */
451-
452445
/**
453446
* mem_cgroup_css_from_page - css of the memcg associated with a page
454447
* @page: page of interest

mm/vmscan.c

Lines changed: 31 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -171,11 +171,22 @@ int vm_swappiness = 60;
171171
*/
172172
unsigned long vm_total_pages;
173173

174+
static void set_task_reclaim_state(struct task_struct *task,
175+
struct reclaim_state *rs)
176+
{
177+
/* Check for an overwrite */
178+
WARN_ON_ONCE(rs && task->reclaim_state);
179+
180+
/* Check for the nulling of an already-nulled member */
181+
WARN_ON_ONCE(!rs && !task->reclaim_state);
182+
183+
task->reclaim_state = rs;
184+
}
185+
174186
static LIST_HEAD(shrinker_list);
175187
static DECLARE_RWSEM(shrinker_rwsem);
176188

177-
#ifdef CONFIG_MEMCG_KMEM
178-
189+
#ifdef CONFIG_MEMCG
179190
/*
180191
* We allow subsystems to populate their shrinker-related
181192
* LRU lists before register_shrinker_prepared() is called
@@ -227,30 +238,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
227238
idr_remove(&shrinker_idr, id);
228239
up_write(&shrinker_rwsem);
229240
}
230-
#else /* CONFIG_MEMCG_KMEM */
231-
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
232-
{
233-
return 0;
234-
}
235241

236-
static void unregister_memcg_shrinker(struct shrinker *shrinker)
237-
{
238-
}
239-
#endif /* CONFIG_MEMCG_KMEM */
240-
241-
static void set_task_reclaim_state(struct task_struct *task,
242-
struct reclaim_state *rs)
243-
{
244-
/* Check for an overwrite */
245-
WARN_ON_ONCE(rs && task->reclaim_state);
246-
247-
/* Check for the nulling of an already-nulled member */
248-
WARN_ON_ONCE(!rs && !task->reclaim_state);
249-
250-
task->reclaim_state = rs;
251-
}
252-
253-
#ifdef CONFIG_MEMCG
254242
static bool global_reclaim(struct scan_control *sc)
255243
{
256244
return !sc->target_mem_cgroup;
@@ -305,6 +293,15 @@ static bool memcg_congested(pg_data_t *pgdat,
305293

306294
}
307295
#else
296+
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
297+
{
298+
return 0;
299+
}
300+
301+
static void unregister_memcg_shrinker(struct shrinker *shrinker)
302+
{
303+
}
304+
308305
static bool global_reclaim(struct scan_control *sc)
309306
{
310307
return true;
@@ -591,15 +588,15 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
591588
return freed;
592589
}
593590

594-
#ifdef CONFIG_MEMCG_KMEM
591+
#ifdef CONFIG_MEMCG
595592
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
596593
struct mem_cgroup *memcg, int priority)
597594
{
598595
struct memcg_shrinker_map *map;
599596
unsigned long ret, freed = 0;
600597
int i;
601598

602-
if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
599+
if (!mem_cgroup_online(memcg))
603600
return 0;
604601

605602
if (!down_read_trylock(&shrinker_rwsem))
@@ -625,6 +622,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
625622
continue;
626623
}
627624

625+
/* Call non-slab shrinkers even though kmem is disabled */
626+
if (!memcg_kmem_enabled() &&
627+
!(shrinker->flags & SHRINKER_NONSLAB))
628+
continue;
629+
628630
ret = do_shrink_slab(&sc, shrinker, priority);
629631
if (ret == SHRINK_EMPTY) {
630632
clear_bit(i, map->map);
@@ -661,13 +663,13 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
661663
up_read(&shrinker_rwsem);
662664
return freed;
663665
}
664-
#else /* CONFIG_MEMCG_KMEM */
666+
#else /* CONFIG_MEMCG */
665667
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
666668
struct mem_cgroup *memcg, int priority)
667669
{
668670
return 0;
669671
}
670-
#endif /* CONFIG_MEMCG_KMEM */
672+
#endif /* CONFIG_MEMCG */
671673

672674
/**
673675
* shrink_slab - shrink slab caches

0 commit comments

Comments
 (0)