@@ -756,12 +756,6 @@ static inline void set_page_order(struct page *page, unsigned int order)
756
756
__SetPageBuddy (page );
757
757
}
758
758
759
- static inline void rmv_page_order (struct page * page )
760
- {
761
- __ClearPageBuddy (page );
762
- set_page_private (page , 0 );
763
- }
764
-
765
759
/*
766
760
* This function checks whether a page is free && is the buddy
767
761
* we can coalesce a page and its buddy if
@@ -919,13 +913,10 @@ static inline void __free_one_page(struct page *page,
919
913
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
920
914
* merge with it and move up one order.
921
915
*/
922
- if (page_is_guard (buddy )) {
916
+ if (page_is_guard (buddy ))
923
917
clear_page_guard (zone , buddy , order , migratetype );
924
- } else {
925
- list_del (& buddy -> lru );
926
- zone -> free_area [order ].nr_free -- ;
927
- rmv_page_order (buddy );
928
- }
918
+ else
919
+ del_page_from_free_area (buddy , & zone -> free_area [order ]);
929
920
combined_pfn = buddy_pfn & pfn ;
930
921
page = page + (combined_pfn - pfn );
931
922
pfn = combined_pfn ;
@@ -975,15 +966,13 @@ static inline void __free_one_page(struct page *page,
975
966
higher_buddy = higher_page + (buddy_pfn - combined_pfn );
976
967
if (pfn_valid_within (buddy_pfn ) &&
977
968
page_is_buddy (higher_page , higher_buddy , order + 1 )) {
978
- list_add_tail ( & page -> lru ,
979
- & zone -> free_area [ order ]. free_list [ migratetype ] );
980
- goto out ;
969
+ add_to_free_area_tail ( page , & zone -> free_area [ order ] ,
970
+ migratetype );
971
+ return ;
981
972
}
982
973
}
983
974
984
- list_add (& page -> lru , & zone -> free_area [order ].free_list [migratetype ]);
985
- out :
986
- zone -> free_area [order ].nr_free ++ ;
975
+ add_to_free_area (page , & zone -> free_area [order ], migratetype );
987
976
}
988
977
989
978
/*
@@ -1974,8 +1963,7 @@ static inline void expand(struct zone *zone, struct page *page,
1974
1963
if (set_page_guard (zone , & page [size ], high , migratetype ))
1975
1964
continue ;
1976
1965
1977
- list_add (& page [size ].lru , & area -> free_list [migratetype ]);
1978
- area -> nr_free ++ ;
1966
+ add_to_free_area (& page [size ], area , migratetype );
1979
1967
set_page_order (& page [size ], high );
1980
1968
}
1981
1969
}
@@ -2117,13 +2105,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2117
2105
/* Find a page of the appropriate size in the preferred list */
2118
2106
for (current_order = order ; current_order < MAX_ORDER ; ++ current_order ) {
2119
2107
area = & (zone -> free_area [current_order ]);
2120
- page = list_first_entry_or_null (& area -> free_list [migratetype ],
2121
- struct page , lru );
2108
+ page = get_page_from_free_area (area , migratetype );
2122
2109
if (!page )
2123
2110
continue ;
2124
- list_del (& page -> lru );
2125
- rmv_page_order (page );
2126
- area -> nr_free -- ;
2111
+ del_page_from_free_area (page , area );
2127
2112
expand (zone , page , order , current_order , area , migratetype );
2128
2113
set_pcppage_migratetype (page , migratetype );
2129
2114
return page ;
@@ -2209,8 +2194,7 @@ static int move_freepages(struct zone *zone,
2209
2194
}
2210
2195
2211
2196
order = page_order (page );
2212
- list_move (& page -> lru ,
2213
- & zone -> free_area [order ].free_list [migratetype ]);
2197
+ move_to_free_area (page , & zone -> free_area [order ], migratetype );
2214
2198
page += 1 << order ;
2215
2199
pages_moved += 1 << order ;
2216
2200
}
@@ -2398,7 +2382,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
2398
2382
2399
2383
single_page :
2400
2384
area = & zone -> free_area [current_order ];
2401
- list_move ( & page -> lru , & area -> free_list [ start_type ] );
2385
+ move_to_free_area ( page , area , start_type );
2402
2386
}
2403
2387
2404
2388
/*
@@ -2422,7 +2406,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
2422
2406
if (fallback_mt == MIGRATE_TYPES )
2423
2407
break ;
2424
2408
2425
- if (list_empty ( & area -> free_list [ fallback_mt ] ))
2409
+ if (free_area_empty ( area , fallback_mt ))
2426
2410
continue ;
2427
2411
2428
2412
if (can_steal_fallback (order , migratetype ))
@@ -2509,9 +2493,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2509
2493
for (order = 0 ; order < MAX_ORDER ; order ++ ) {
2510
2494
struct free_area * area = & (zone -> free_area [order ]);
2511
2495
2512
- page = list_first_entry_or_null (
2513
- & area -> free_list [MIGRATE_HIGHATOMIC ],
2514
- struct page , lru );
2496
+ page = get_page_from_free_area (area , MIGRATE_HIGHATOMIC );
2515
2497
if (!page )
2516
2498
continue ;
2517
2499
@@ -2634,8 +2616,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2634
2616
VM_BUG_ON (current_order == MAX_ORDER );
2635
2617
2636
2618
do_steal :
2637
- page = list_first_entry (& area -> free_list [fallback_mt ],
2638
- struct page , lru );
2619
+ page = get_page_from_free_area (area , fallback_mt );
2639
2620
2640
2621
steal_suitable_fallback (zone , page , alloc_flags , start_migratetype ,
2641
2622
can_steal );
@@ -3072,6 +3053,7 @@ EXPORT_SYMBOL_GPL(split_page);
3072
3053
3073
3054
int __isolate_free_page (struct page * page , unsigned int order )
3074
3055
{
3056
+ struct free_area * area = & page_zone (page )-> free_area [order ];
3075
3057
unsigned long watermark ;
3076
3058
struct zone * zone ;
3077
3059
int mt ;
@@ -3096,9 +3078,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
3096
3078
}
3097
3079
3098
3080
/* Remove page from free list */
3099
- list_del (& page -> lru );
3100
- zone -> free_area [order ].nr_free -- ;
3101
- rmv_page_order (page );
3081
+
3082
+ del_page_from_free_area (page , area );
3102
3083
3103
3084
/*
3104
3085
* Set the pageblock if the isolated page is at least half of a
@@ -3395,13 +3376,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3395
3376
continue ;
3396
3377
3397
3378
for (mt = 0 ; mt < MIGRATE_PCPTYPES ; mt ++ ) {
3398
- if (!list_empty ( & area -> free_list [ mt ] ))
3379
+ if (!free_area_empty ( area , mt ))
3399
3380
return true;
3400
3381
}
3401
3382
3402
3383
#ifdef CONFIG_CMA
3403
3384
if ((alloc_flags & ALLOC_CMA ) &&
3404
- !list_empty ( & area -> free_list [ MIGRATE_CMA ] )) {
3385
+ !free_area_empty ( area , MIGRATE_CMA )) {
3405
3386
return true;
3406
3387
}
3407
3388
#endif
@@ -5328,7 +5309,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5328
5309
5329
5310
types [order ] = 0 ;
5330
5311
for (type = 0 ; type < MIGRATE_TYPES ; type ++ ) {
5331
- if (!list_empty ( & area -> free_list [ type ] ))
5312
+ if (!free_area_empty ( area , type ))
5332
5313
types [order ] |= 1 << type ;
5333
5314
}
5334
5315
}
@@ -8501,9 +8482,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8501
8482
pr_info ("remove from free list %lx %d %lx\n" ,
8502
8483
pfn , 1 << order , end_pfn );
8503
8484
#endif
8504
- list_del (& page -> lru );
8505
- rmv_page_order (page );
8506
- zone -> free_area [order ].nr_free -- ;
8485
+ del_page_from_free_area (page , & zone -> free_area [order ]);
8507
8486
for (i = 0 ; i < (1 << order ); i ++ )
8508
8487
SetPageReserved ((page + i ));
8509
8488
pfn += (1 << order );
0 commit comments