@@ -1014,7 +1014,7 @@ static void add_full(struct kmem_cache *s,
1014
1014
return ;
1015
1015
1016
1016
lockdep_assert_held (& n -> list_lock );
1017
- list_add (& page -> lru , & n -> full );
1017
+ list_add (& page -> slab_list , & n -> full );
1018
1018
}
1019
1019
1020
1020
static void remove_full (struct kmem_cache * s , struct kmem_cache_node * n , struct page * page )
@@ -1023,7 +1023,7 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
1023
1023
return ;
1024
1024
1025
1025
lockdep_assert_held (& n -> list_lock );
1026
- list_del (& page -> lru );
1026
+ list_del (& page -> slab_list );
1027
1027
}
1028
1028
1029
1029
/* Tracking of the number of slabs for debugging purposes */
@@ -1764,9 +1764,9 @@ __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1764
1764
{
1765
1765
n -> nr_partial ++ ;
1766
1766
if (tail == DEACTIVATE_TO_TAIL )
1767
- list_add_tail (& page -> lru , & n -> partial );
1767
+ list_add_tail (& page -> slab_list , & n -> partial );
1768
1768
else
1769
- list_add (& page -> lru , & n -> partial );
1769
+ list_add (& page -> slab_list , & n -> partial );
1770
1770
}
1771
1771
1772
1772
static inline void add_partial (struct kmem_cache_node * n ,
@@ -1780,7 +1780,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
1780
1780
struct page * page )
1781
1781
{
1782
1782
lockdep_assert_held (& n -> list_lock );
1783
- list_del (& page -> lru );
1783
+ list_del (& page -> slab_list );
1784
1784
n -> nr_partial -- ;
1785
1785
}
1786
1786
@@ -1854,7 +1854,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1854
1854
return NULL ;
1855
1855
1856
1856
spin_lock (& n -> list_lock );
1857
- list_for_each_entry_safe (page , page2 , & n -> partial , lru ) {
1857
+ list_for_each_entry_safe (page , page2 , & n -> partial , slab_list ) {
1858
1858
void * t ;
1859
1859
1860
1860
if (!pfmemalloc_match (page , flags ))
@@ -2398,7 +2398,7 @@ static unsigned long count_partial(struct kmem_cache_node *n,
2398
2398
struct page * page ;
2399
2399
2400
2400
spin_lock_irqsave (& n -> list_lock , flags );
2401
- list_for_each_entry (page , & n -> partial , lru )
2401
+ list_for_each_entry (page , & n -> partial , slab_list )
2402
2402
x += get_count (page );
2403
2403
spin_unlock_irqrestore (& n -> list_lock , flags );
2404
2404
return x ;
@@ -3696,18 +3696,18 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3696
3696
3697
3697
BUG_ON (irqs_disabled ());
3698
3698
spin_lock_irq (& n -> list_lock );
3699
- list_for_each_entry_safe (page , h , & n -> partial , lru ) {
3699
+ list_for_each_entry_safe (page , h , & n -> partial , slab_list ) {
3700
3700
if (!page -> inuse ) {
3701
3701
remove_partial (n , page );
3702
- list_add (& page -> lru , & discard );
3702
+ list_add (& page -> slab_list , & discard );
3703
3703
} else {
3704
3704
list_slab_objects (s , page ,
3705
3705
"Objects remaining in %s on __kmem_cache_shutdown()" );
3706
3706
}
3707
3707
}
3708
3708
spin_unlock_irq (& n -> list_lock );
3709
3709
3710
- list_for_each_entry_safe (page , h , & discard , lru )
3710
+ list_for_each_entry_safe (page , h , & discard , slab_list )
3711
3711
discard_slab (s , page );
3712
3712
}
3713
3713
@@ -3987,7 +3987,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
3987
3987
* Note that concurrent frees may occur while we hold the
3988
3988
* list_lock. page->inuse here is the upper limit.
3989
3989
*/
3990
- list_for_each_entry_safe (page , t , & n -> partial , lru ) {
3990
+ list_for_each_entry_safe (page , t , & n -> partial , slab_list ) {
3991
3991
int free = page -> objects - page -> inuse ;
3992
3992
3993
3993
/* Do not reread page->inuse */
@@ -3997,10 +3997,10 @@ int __kmem_cache_shrink(struct kmem_cache *s)
3997
3997
BUG_ON (free <= 0 );
3998
3998
3999
3999
if (free == page -> objects ) {
4000
- list_move (& page -> lru , & discard );
4000
+ list_move (& page -> slab_list , & discard );
4001
4001
n -> nr_partial -- ;
4002
4002
} else if (free <= SHRINK_PROMOTE_MAX )
4003
- list_move (& page -> lru , promote + free - 1 );
4003
+ list_move (& page -> slab_list , promote + free - 1 );
4004
4004
}
4005
4005
4006
4006
/*
@@ -4013,7 +4013,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
4013
4013
spin_unlock_irqrestore (& n -> list_lock , flags );
4014
4014
4015
4015
/* Release empty slabs */
4016
- list_for_each_entry_safe (page , t , & discard , lru )
4016
+ list_for_each_entry_safe (page , t , & discard , slab_list )
4017
4017
discard_slab (s , page );
4018
4018
4019
4019
if (slabs_node (s , node ))
@@ -4205,11 +4205,11 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4205
4205
for_each_kmem_cache_node (s , node , n ) {
4206
4206
struct page * p ;
4207
4207
4208
- list_for_each_entry (p , & n -> partial , lru )
4208
+ list_for_each_entry (p , & n -> partial , slab_list )
4209
4209
p -> slab_cache = s ;
4210
4210
4211
4211
#ifdef CONFIG_SLUB_DEBUG
4212
- list_for_each_entry (p , & n -> full , lru )
4212
+ list_for_each_entry (p , & n -> full , slab_list )
4213
4213
p -> slab_cache = s ;
4214
4214
#endif
4215
4215
}
@@ -4426,7 +4426,7 @@ static int validate_slab_node(struct kmem_cache *s,
4426
4426
4427
4427
spin_lock_irqsave (& n -> list_lock , flags );
4428
4428
4429
- list_for_each_entry (page , & n -> partial , lru ) {
4429
+ list_for_each_entry (page , & n -> partial , slab_list ) {
4430
4430
validate_slab_slab (s , page , map );
4431
4431
count ++ ;
4432
4432
}
@@ -4437,7 +4437,7 @@ static int validate_slab_node(struct kmem_cache *s,
4437
4437
if (!(s -> flags & SLAB_STORE_USER ))
4438
4438
goto out ;
4439
4439
4440
- list_for_each_entry (page , & n -> full , lru ) {
4440
+ list_for_each_entry (page , & n -> full , slab_list ) {
4441
4441
validate_slab_slab (s , page , map );
4442
4442
count ++ ;
4443
4443
}
@@ -4633,9 +4633,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
4633
4633
continue ;
4634
4634
4635
4635
spin_lock_irqsave (& n -> list_lock , flags );
4636
- list_for_each_entry (page , & n -> partial , lru )
4636
+ list_for_each_entry (page , & n -> partial , slab_list )
4637
4637
process_slab (& t , s , page , alloc , map );
4638
- list_for_each_entry (page , & n -> full , lru )
4638
+ list_for_each_entry (page , & n -> full , slab_list )
4639
4639
process_slab (& t , s , page , alloc , map );
4640
4640
spin_unlock_irqrestore (& n -> list_lock , flags );
4641
4641
}
0 commit comments