Skip to content

Commit 916ac05

Browse files
Tobin C. Hardingtorvalds
authored andcommitted
slub: use slab_list instead of lru
Currently we use the page->lru list for maintaining lists of slabs. We have a list in the page structure (slab_list) that can be used for this purpose. Doing so makes the code cleaner since we are not overloading the lru list. Use the slab_list instead of the lru list for maintaining lists of slabs. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Tobin C. Harding <[email protected]> Acked-by: Christoph Lameter <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Pekka Enberg <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 6dfd1b6 commit 916ac05

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

mm/slub.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1014,7 +1014,7 @@ static void add_full(struct kmem_cache *s,
10141014
return;
10151015

10161016
lockdep_assert_held(&n->list_lock);
1017-
list_add(&page->lru, &n->full);
1017+
list_add(&page->slab_list, &n->full);
10181018
}
10191019

10201020
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
@@ -1023,7 +1023,7 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
10231023
return;
10241024

10251025
lockdep_assert_held(&n->list_lock);
1026-
list_del(&page->lru);
1026+
list_del(&page->slab_list);
10271027
}
10281028

10291029
/* Tracking of the number of slabs for debugging purposes */
@@ -1764,9 +1764,9 @@ __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
17641764
{
17651765
n->nr_partial++;
17661766
if (tail == DEACTIVATE_TO_TAIL)
1767-
list_add_tail(&page->lru, &n->partial);
1767+
list_add_tail(&page->slab_list, &n->partial);
17681768
else
1769-
list_add(&page->lru, &n->partial);
1769+
list_add(&page->slab_list, &n->partial);
17701770
}
17711771

17721772
static inline void add_partial(struct kmem_cache_node *n,
@@ -1780,7 +1780,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
17801780
struct page *page)
17811781
{
17821782
lockdep_assert_held(&n->list_lock);
1783-
list_del(&page->lru);
1783+
list_del(&page->slab_list);
17841784
n->nr_partial--;
17851785
}
17861786

@@ -1854,7 +1854,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
18541854
return NULL;
18551855

18561856
spin_lock(&n->list_lock);
1857-
list_for_each_entry_safe(page, page2, &n->partial, lru) {
1857+
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
18581858
void *t;
18591859

18601860
if (!pfmemalloc_match(page, flags))
@@ -2398,7 +2398,7 @@ static unsigned long count_partial(struct kmem_cache_node *n,
23982398
struct page *page;
23992399

24002400
spin_lock_irqsave(&n->list_lock, flags);
2401-
list_for_each_entry(page, &n->partial, lru)
2401+
list_for_each_entry(page, &n->partial, slab_list)
24022402
x += get_count(page);
24032403
spin_unlock_irqrestore(&n->list_lock, flags);
24042404
return x;
@@ -3696,18 +3696,18 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
36963696

36973697
BUG_ON(irqs_disabled());
36983698
spin_lock_irq(&n->list_lock);
3699-
list_for_each_entry_safe(page, h, &n->partial, lru) {
3699+
list_for_each_entry_safe(page, h, &n->partial, slab_list) {
37003700
if (!page->inuse) {
37013701
remove_partial(n, page);
3702-
list_add(&page->lru, &discard);
3702+
list_add(&page->slab_list, &discard);
37033703
} else {
37043704
list_slab_objects(s, page,
37053705
"Objects remaining in %s on __kmem_cache_shutdown()");
37063706
}
37073707
}
37083708
spin_unlock_irq(&n->list_lock);
37093709

3710-
list_for_each_entry_safe(page, h, &discard, lru)
3710+
list_for_each_entry_safe(page, h, &discard, slab_list)
37113711
discard_slab(s, page);
37123712
}
37133713

@@ -3987,7 +3987,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
39873987
* Note that concurrent frees may occur while we hold the
39883988
* list_lock. page->inuse here is the upper limit.
39893989
*/
3990-
list_for_each_entry_safe(page, t, &n->partial, lru) {
3990+
list_for_each_entry_safe(page, t, &n->partial, slab_list) {
39913991
int free = page->objects - page->inuse;
39923992

39933993
/* Do not reread page->inuse */
@@ -3997,10 +3997,10 @@ int __kmem_cache_shrink(struct kmem_cache *s)
39973997
BUG_ON(free <= 0);
39983998

39993999
if (free == page->objects) {
4000-
list_move(&page->lru, &discard);
4000+
list_move(&page->slab_list, &discard);
40014001
n->nr_partial--;
40024002
} else if (free <= SHRINK_PROMOTE_MAX)
4003-
list_move(&page->lru, promote + free - 1);
4003+
list_move(&page->slab_list, promote + free - 1);
40044004
}
40054005

40064006
/*
@@ -4013,7 +4013,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
40134013
spin_unlock_irqrestore(&n->list_lock, flags);
40144014

40154015
/* Release empty slabs */
4016-
list_for_each_entry_safe(page, t, &discard, lru)
4016+
list_for_each_entry_safe(page, t, &discard, slab_list)
40174017
discard_slab(s, page);
40184018

40194019
if (slabs_node(s, node))
@@ -4205,11 +4205,11 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
42054205
for_each_kmem_cache_node(s, node, n) {
42064206
struct page *p;
42074207

4208-
list_for_each_entry(p, &n->partial, lru)
4208+
list_for_each_entry(p, &n->partial, slab_list)
42094209
p->slab_cache = s;
42104210

42114211
#ifdef CONFIG_SLUB_DEBUG
4212-
list_for_each_entry(p, &n->full, lru)
4212+
list_for_each_entry(p, &n->full, slab_list)
42134213
p->slab_cache = s;
42144214
#endif
42154215
}
@@ -4426,7 +4426,7 @@ static int validate_slab_node(struct kmem_cache *s,
44264426

44274427
spin_lock_irqsave(&n->list_lock, flags);
44284428

4429-
list_for_each_entry(page, &n->partial, lru) {
4429+
list_for_each_entry(page, &n->partial, slab_list) {
44304430
validate_slab_slab(s, page, map);
44314431
count++;
44324432
}
@@ -4437,7 +4437,7 @@ static int validate_slab_node(struct kmem_cache *s,
44374437
if (!(s->flags & SLAB_STORE_USER))
44384438
goto out;
44394439

4440-
list_for_each_entry(page, &n->full, lru) {
4440+
list_for_each_entry(page, &n->full, slab_list) {
44414441
validate_slab_slab(s, page, map);
44424442
count++;
44434443
}
@@ -4633,9 +4633,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
46334633
continue;
46344634

46354635
spin_lock_irqsave(&n->list_lock, flags);
4636-
list_for_each_entry(page, &n->partial, lru)
4636+
list_for_each_entry(page, &n->partial, slab_list)
46374637
process_slab(&t, s, page, alloc, map);
4638-
list_for_each_entry(page, &n->full, lru)
4638+
list_for_each_entry(page, &n->full, slab_list)
46394639
process_slab(&t, s, page, alloc, map);
46404640
spin_unlock_irqrestore(&n->list_lock, flags);
46414641
}

0 commit comments

Comments
 (0)