@@ -1674,8 +1674,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1674
1674
{
1675
1675
struct page * page , * n ;
1676
1676
1677
- list_for_each_entry_safe (page , n , list , lru ) {
1678
- list_del (& page -> lru );
1677
+ list_for_each_entry_safe (page , n , list , slab_list ) {
1678
+ list_del (& page -> slab_list );
1679
1679
slab_destroy (cachep , page );
1680
1680
}
1681
1681
}
@@ -2231,8 +2231,8 @@ static int drain_freelist(struct kmem_cache *cache,
2231
2231
goto out ;
2232
2232
}
2233
2233
2234
- page = list_entry (p , struct page , lru );
2235
- list_del (& page -> lru );
2234
+ page = list_entry (p , struct page , slab_list );
2235
+ list_del (& page -> slab_list );
2236
2236
n -> free_slabs -- ;
2237
2237
n -> total_slabs -- ;
2238
2238
/*
@@ -2691,13 +2691,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2691
2691
if (!page )
2692
2692
return ;
2693
2693
2694
- INIT_LIST_HEAD (& page -> lru );
2694
+ INIT_LIST_HEAD (& page -> slab_list );
2695
2695
n = get_node (cachep , page_to_nid (page ));
2696
2696
2697
2697
spin_lock (& n -> list_lock );
2698
2698
n -> total_slabs ++ ;
2699
2699
if (!page -> active ) {
2700
- list_add_tail (& page -> lru , & ( n -> slabs_free ) );
2700
+ list_add_tail (& page -> slab_list , & n -> slabs_free );
2701
2701
n -> free_slabs ++ ;
2702
2702
} else
2703
2703
fixup_slab_list (cachep , n , page , & list );
@@ -2806,9 +2806,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
2806
2806
void * * list )
2807
2807
{
2808
2808
/* move slabp to correct slabp list: */
2809
- list_del (& page -> lru );
2809
+ list_del (& page -> slab_list );
2810
2810
if (page -> active == cachep -> num ) {
2811
- list_add (& page -> lru , & n -> slabs_full );
2811
+ list_add (& page -> slab_list , & n -> slabs_full );
2812
2812
if (OBJFREELIST_SLAB (cachep )) {
2813
2813
#if DEBUG
2814
2814
/* Poisoning will be done without holding the lock */
@@ -2822,7 +2822,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
2822
2822
page -> freelist = NULL ;
2823
2823
}
2824
2824
} else
2825
- list_add (& page -> lru , & n -> slabs_partial );
2825
+ list_add (& page -> slab_list , & n -> slabs_partial );
2826
2826
}
2827
2827
2828
2828
/* Try to find non-pfmemalloc slab if needed */
@@ -2845,20 +2845,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2845
2845
}
2846
2846
2847
2847
/* Move pfmemalloc slab to the end of list to speed up next search */
2848
- list_del (& page -> lru );
2848
+ list_del (& page -> slab_list );
2849
2849
if (!page -> active ) {
2850
- list_add_tail (& page -> lru , & n -> slabs_free );
2850
+ list_add_tail (& page -> slab_list , & n -> slabs_free );
2851
2851
n -> free_slabs ++ ;
2852
2852
} else
2853
- list_add_tail (& page -> lru , & n -> slabs_partial );
2853
+ list_add_tail (& page -> slab_list , & n -> slabs_partial );
2854
2854
2855
- list_for_each_entry (page , & n -> slabs_partial , lru ) {
2855
+ list_for_each_entry (page , & n -> slabs_partial , slab_list ) {
2856
2856
if (!PageSlabPfmemalloc (page ))
2857
2857
return page ;
2858
2858
}
2859
2859
2860
2860
n -> free_touched = 1 ;
2861
- list_for_each_entry (page , & n -> slabs_free , lru ) {
2861
+ list_for_each_entry (page , & n -> slabs_free , slab_list ) {
2862
2862
if (!PageSlabPfmemalloc (page )) {
2863
2863
n -> free_slabs -- ;
2864
2864
return page ;
@@ -2873,11 +2873,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2873
2873
struct page * page ;
2874
2874
2875
2875
assert_spin_locked (& n -> list_lock );
2876
- page = list_first_entry_or_null (& n -> slabs_partial , struct page , lru );
2876
+ page = list_first_entry_or_null (& n -> slabs_partial , struct page ,
2877
+ slab_list );
2877
2878
if (!page ) {
2878
2879
n -> free_touched = 1 ;
2879
2880
page = list_first_entry_or_null (& n -> slabs_free , struct page ,
2880
- lru );
2881
+ slab_list );
2881
2882
if (page )
2882
2883
n -> free_slabs -- ;
2883
2884
}
@@ -3378,29 +3379,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
3378
3379
objp = objpp [i ];
3379
3380
3380
3381
page = virt_to_head_page (objp );
3381
- list_del (& page -> lru );
3382
+ list_del (& page -> slab_list );
3382
3383
check_spinlock_acquired_node (cachep , node );
3383
3384
slab_put_obj (cachep , page , objp );
3384
3385
STATS_DEC_ACTIVE (cachep );
3385
3386
3386
3387
/* fixup slab chains */
3387
3388
if (page -> active == 0 ) {
3388
- list_add (& page -> lru , & n -> slabs_free );
3389
+ list_add (& page -> slab_list , & n -> slabs_free );
3389
3390
n -> free_slabs ++ ;
3390
3391
} else {
3391
3392
/* Unconditionally move a slab to the end of the
3392
3393
* partial list on free - maximum time for the
3393
3394
* other objects to be freed, too.
3394
3395
*/
3395
- list_add_tail (& page -> lru , & n -> slabs_partial );
3396
+ list_add_tail (& page -> slab_list , & n -> slabs_partial );
3396
3397
}
3397
3398
}
3398
3399
3399
3400
while (n -> free_objects > n -> free_limit && !list_empty (& n -> slabs_free )) {
3400
3401
n -> free_objects -= cachep -> num ;
3401
3402
3402
- page = list_last_entry (& n -> slabs_free , struct page , lru );
3403
- list_move (& page -> lru , list );
3403
+ page = list_last_entry (& n -> slabs_free , struct page , slab_list );
3404
+ list_move (& page -> slab_list , list );
3404
3405
n -> free_slabs -- ;
3405
3406
n -> total_slabs -- ;
3406
3407
}
@@ -3438,7 +3439,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3438
3439
int i = 0 ;
3439
3440
struct page * page ;
3440
3441
3441
- list_for_each_entry (page , & n -> slabs_free , lru ) {
3442
+ list_for_each_entry (page , & n -> slabs_free , slab_list ) {
3442
3443
BUG_ON (page -> active );
3443
3444
3444
3445
i ++ ;
@@ -4302,9 +4303,9 @@ static int leaks_show(struct seq_file *m, void *p)
4302
4303
check_irq_on ();
4303
4304
spin_lock_irq (& n -> list_lock );
4304
4305
4305
- list_for_each_entry (page , & n -> slabs_full , lru )
4306
+ list_for_each_entry (page , & n -> slabs_full , slab_list )
4306
4307
handle_slab (x , cachep , page );
4307
- list_for_each_entry (page , & n -> slabs_partial , lru )
4308
+ list_for_each_entry (page , & n -> slabs_partial , slab_list )
4308
4309
handle_slab (x , cachep , page );
4309
4310
spin_unlock_irq (& n -> list_lock );
4310
4311
}
0 commit comments