Skip to content

Commit 6cea1d5

Browse files
rgushchintorvalds
authored andcommitted
mm: memcg/slab: unify SLAB and SLUB page accounting
Currently the page accounting code is duplicated in SLAB and SLUB internals. Let's move it into new (un)charge_slab_page helpers in the slab_common.c file. These helpers will be responsible for statistics (global and memcg-aware) and memcg charging. So they are replacing direct memcg_(un)charge_slab() calls. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Roman Gushchin <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Acked-by: Christoph Lameter <[email protected]> Acked-by: Vladimir Davydov <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Waiman Long <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: Andrei Vagin <[email protected]> Cc: Qian Cai <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 49a18ea commit 6cea1d5

File tree

3 files changed

+30
-28
lines changed

3 files changed

+30
-28
lines changed

mm/slab.c

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1360,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
13601360
int nodeid)
13611361
{
13621362
struct page *page;
1363-
int nr_pages;
13641363

13651364
flags |= cachep->allocflags;
13661365

@@ -1370,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
13701369
return NULL;
13711370
}
13721371

1373-
if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1372+
if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
13741373
__free_pages(page, cachep->gfporder);
13751374
return NULL;
13761375
}
13771376

1378-
nr_pages = (1 << cachep->gfporder);
1379-
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1380-
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1381-
else
1382-
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1383-
13841377
__SetPageSlab(page);
13851378
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
13861379
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1395,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
13951388
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
13961389
{
13971390
int order = cachep->gfporder;
1398-
unsigned long nr_freed = (1 << order);
1399-
1400-
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1401-
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1402-
else
1403-
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
14041391

14051392
BUG_ON(!PageSlab(page));
14061393
__ClearPageSlabPfmemalloc(page);
@@ -1409,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
14091396
page->mapping = NULL;
14101397

14111398
if (current->reclaim_state)
1412-
current->reclaim_state->reclaimed_slab += nr_freed;
1413-
memcg_uncharge_slab(page, order, cachep);
1399+
current->reclaim_state->reclaimed_slab += 1 << order;
1400+
uncharge_slab_page(page, order, cachep);
14141401
__free_pages(page, order);
14151402
}
14161403

mm/slab.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
205205
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
206206
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
207207

208+
static inline int cache_vmstat_idx(struct kmem_cache *s)
209+
{
210+
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
211+
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
212+
}
213+
208214
#ifdef CONFIG_MEMCG_KMEM
209215

210216
/* List of all root caches. */
@@ -361,6 +367,25 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
361367
return page->slab_cache;
362368
}
363369

370+
static __always_inline int charge_slab_page(struct page *page,
371+
gfp_t gfp, int order,
372+
struct kmem_cache *s)
373+
{
374+
int ret = memcg_charge_slab(page, gfp, order, s);
375+
376+
if (!ret)
377+
mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order);
378+
379+
return ret;
380+
}
381+
382+
static __always_inline void uncharge_slab_page(struct page *page, int order,
383+
struct kmem_cache *s)
384+
{
385+
mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order));
386+
memcg_uncharge_slab(page, order, s);
387+
}
388+
364389
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
365390
{
366391
struct kmem_cache *cachep;

mm/slub.c

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1488,7 +1488,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
14881488
else
14891489
page = __alloc_pages_node(node, flags, order);
14901490

1491-
if (page && memcg_charge_slab(page, flags, order, s)) {
1491+
if (page && charge_slab_page(page, flags, order, s)) {
14921492
__free_pages(page, order);
14931493
page = NULL;
14941494
}
@@ -1681,11 +1681,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
16811681
if (!page)
16821682
return NULL;
16831683

1684-
mod_lruvec_page_state(page,
1685-
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1686-
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1687-
1 << oo_order(oo));
1688-
16891684
inc_slabs_node(s, page_to_nid(page), page->objects);
16901685

16911686
return page;
@@ -1719,18 +1714,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
17191714
check_object(s, page, p, SLUB_RED_INACTIVE);
17201715
}
17211716

1722-
mod_lruvec_page_state(page,
1723-
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1724-
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1725-
-pages);
1726-
17271717
__ClearPageSlabPfmemalloc(page);
17281718
__ClearPageSlab(page);
17291719

17301720
page->mapping = NULL;
17311721
if (current->reclaim_state)
17321722
current->reclaim_state->reclaimed_slab += pages;
1733-
memcg_uncharge_slab(page, order, s);
1723+
uncharge_slab_page(page, order, s);
17341724
__free_pages(page, order);
17351725
}
17361726

0 commit comments

Comments
 (0)