Skip to content

Commit 16cb0ec

Browse files
Tobin C. Hardingtorvalds
authored andcommitted
slab: use slab_list instead of lru
Currently we use the page->lru list for maintaining lists of slabs. We have a list in the page structure (slab_list) that can be used for this purpose. Doing so makes the code cleaner since we are not overloading the lru list. Use the slab_list instead of the lru list for maintaining lists of slabs. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Tobin C. Harding <[email protected]> Acked-by: Christoph Lameter <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Pekka Enberg <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 916ac05 commit 16cb0ec

File tree

1 file changed

+25
-24
lines changed

1 file changed

+25
-24
lines changed

mm/slab.c

Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1674,8 +1674,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
16741674
{
16751675
struct page *page, *n;
16761676

1677-
list_for_each_entry_safe(page, n, list, lru) {
1678-
list_del(&page->lru);
1677+
list_for_each_entry_safe(page, n, list, slab_list) {
1678+
list_del(&page->slab_list);
16791679
slab_destroy(cachep, page);
16801680
}
16811681
}
@@ -2231,8 +2231,8 @@ static int drain_freelist(struct kmem_cache *cache,
22312231
goto out;
22322232
}
22332233

2234-
page = list_entry(p, struct page, lru);
2235-
list_del(&page->lru);
2234+
page = list_entry(p, struct page, slab_list);
2235+
list_del(&page->slab_list);
22362236
n->free_slabs--;
22372237
n->total_slabs--;
22382238
/*
@@ -2691,13 +2691,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
26912691
if (!page)
26922692
return;
26932693

2694-
INIT_LIST_HEAD(&page->lru);
2694+
INIT_LIST_HEAD(&page->slab_list);
26952695
n = get_node(cachep, page_to_nid(page));
26962696

26972697
spin_lock(&n->list_lock);
26982698
n->total_slabs++;
26992699
if (!page->active) {
2700-
list_add_tail(&page->lru, &(n->slabs_free));
2700+
list_add_tail(&page->slab_list, &n->slabs_free);
27012701
n->free_slabs++;
27022702
} else
27032703
fixup_slab_list(cachep, n, page, &list);
@@ -2806,9 +2806,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
28062806
void **list)
28072807
{
28082808
/* move slabp to correct slabp list: */
2809-
list_del(&page->lru);
2809+
list_del(&page->slab_list);
28102810
if (page->active == cachep->num) {
2811-
list_add(&page->lru, &n->slabs_full);
2811+
list_add(&page->slab_list, &n->slabs_full);
28122812
if (OBJFREELIST_SLAB(cachep)) {
28132813
#if DEBUG
28142814
/* Poisoning will be done without holding the lock */
@@ -2822,7 +2822,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
28222822
page->freelist = NULL;
28232823
}
28242824
} else
2825-
list_add(&page->lru, &n->slabs_partial);
2825+
list_add(&page->slab_list, &n->slabs_partial);
28262826
}
28272827

28282828
/* Try to find non-pfmemalloc slab if needed */
@@ -2845,20 +2845,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
28452845
}
28462846

28472847
/* Move pfmemalloc slab to the end of list to speed up next search */
2848-
list_del(&page->lru);
2848+
list_del(&page->slab_list);
28492849
if (!page->active) {
2850-
list_add_tail(&page->lru, &n->slabs_free);
2850+
list_add_tail(&page->slab_list, &n->slabs_free);
28512851
n->free_slabs++;
28522852
} else
2853-
list_add_tail(&page->lru, &n->slabs_partial);
2853+
list_add_tail(&page->slab_list, &n->slabs_partial);
28542854

2855-
list_for_each_entry(page, &n->slabs_partial, lru) {
2855+
list_for_each_entry(page, &n->slabs_partial, slab_list) {
28562856
if (!PageSlabPfmemalloc(page))
28572857
return page;
28582858
}
28592859

28602860
n->free_touched = 1;
2861-
list_for_each_entry(page, &n->slabs_free, lru) {
2861+
list_for_each_entry(page, &n->slabs_free, slab_list) {
28622862
if (!PageSlabPfmemalloc(page)) {
28632863
n->free_slabs--;
28642864
return page;
@@ -2873,11 +2873,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
28732873
struct page *page;
28742874

28752875
assert_spin_locked(&n->list_lock);
2876-
page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2876+
page = list_first_entry_or_null(&n->slabs_partial, struct page,
2877+
slab_list);
28772878
if (!page) {
28782879
n->free_touched = 1;
28792880
page = list_first_entry_or_null(&n->slabs_free, struct page,
2880-
lru);
2881+
slab_list);
28812882
if (page)
28822883
n->free_slabs--;
28832884
}
@@ -3378,29 +3379,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
33783379
objp = objpp[i];
33793380

33803381
page = virt_to_head_page(objp);
3381-
list_del(&page->lru);
3382+
list_del(&page->slab_list);
33823383
check_spinlock_acquired_node(cachep, node);
33833384
slab_put_obj(cachep, page, objp);
33843385
STATS_DEC_ACTIVE(cachep);
33853386

33863387
/* fixup slab chains */
33873388
if (page->active == 0) {
3388-
list_add(&page->lru, &n->slabs_free);
3389+
list_add(&page->slab_list, &n->slabs_free);
33893390
n->free_slabs++;
33903391
} else {
33913392
/* Unconditionally move a slab to the end of the
33923393
* partial list on free - maximum time for the
33933394
* other objects to be freed, too.
33943395
*/
3395-
list_add_tail(&page->lru, &n->slabs_partial);
3396+
list_add_tail(&page->slab_list, &n->slabs_partial);
33963397
}
33973398
}
33983399

33993400
while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
34003401
n->free_objects -= cachep->num;
34013402

3402-
page = list_last_entry(&n->slabs_free, struct page, lru);
3403-
list_move(&page->lru, list);
3403+
page = list_last_entry(&n->slabs_free, struct page, slab_list);
3404+
list_move(&page->slab_list, list);
34043405
n->free_slabs--;
34053406
n->total_slabs--;
34063407
}
@@ -3438,7 +3439,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
34383439
int i = 0;
34393440
struct page *page;
34403441

3441-
list_for_each_entry(page, &n->slabs_free, lru) {
3442+
list_for_each_entry(page, &n->slabs_free, slab_list) {
34423443
BUG_ON(page->active);
34433444

34443445
i++;
@@ -4302,9 +4303,9 @@ static int leaks_show(struct seq_file *m, void *p)
43024303
check_irq_on();
43034304
spin_lock_irq(&n->list_lock);
43044305

4305-
list_for_each_entry(page, &n->slabs_full, lru)
4306+
list_for_each_entry(page, &n->slabs_full, slab_list)
43064307
handle_slab(x, cachep, page);
4307-
list_for_each_entry(page, &n->slabs_partial, lru)
4308+
list_for_each_entry(page, &n->slabs_partial, slab_list)
43084309
handle_slab(x, cachep, page);
43094310
spin_unlock_irq(&n->list_lock);
43104311
}

0 commit comments

Comments
 (0)