Skip to content

Commit ab5e653

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm/swap: convert release_pages to use a folio internally
This function was already calling compound_head(), but now it can cache the result of calling compound_head() and avoid calling it again. Saves 299 bytes of text by avoiding various calls to compound_page() and avoiding checks of PageTail. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 2397f78 commit ab5e653

File tree

1 file changed

+16
-18
lines changed

1 file changed

+16
-18
lines changed

mm/swap.c

Lines changed: 16 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -941,8 +941,7 @@ void release_pages(struct page **pages, int nr)
941941
unsigned int lock_batch;
942942

943943
for (i = 0; i < nr; i++) {
944-
struct page *page = pages[i];
945-
struct folio *folio = page_folio(page);
944+
struct folio *folio = page_folio(pages[i]);
946945

947946
/*
948947
* Make sure the IRQ-safe lock-holding time does not get
@@ -954,44 +953,43 @@ void release_pages(struct page **pages, int nr)
954953
lruvec = NULL;
955954
}
956955

957-
page = &folio->page;
958-
if (is_huge_zero_page(page))
956+
if (is_huge_zero_page(&folio->page))
959957
continue;
960958

961-
if (is_zone_device_page(page)) {
959+
if (folio_is_zone_device(folio)) {
962960
if (lruvec) {
963961
unlock_page_lruvec_irqrestore(lruvec, flags);
964962
lruvec = NULL;
965963
}
966-
if (put_devmap_managed_page(page))
964+
if (put_devmap_managed_page(&folio->page))
967965
continue;
968-
if (put_page_testzero(page))
969-
free_zone_device_page(page);
966+
if (folio_put_testzero(folio))
967+
free_zone_device_page(&folio->page);
970968
continue;
971969
}
972970

973-
if (!put_page_testzero(page))
971+
if (!folio_put_testzero(folio))
974972
continue;
975973

976-
if (PageCompound(page)) {
974+
if (folio_test_large(folio)) {
977975
if (lruvec) {
978976
unlock_page_lruvec_irqrestore(lruvec, flags);
979977
lruvec = NULL;
980978
}
981-
__put_compound_page(page);
979+
__put_compound_page(&folio->page);
982980
continue;
983981
}
984982

985-
if (PageLRU(page)) {
983+
if (folio_test_lru(folio)) {
986984
struct lruvec *prev_lruvec = lruvec;
987985

988986
lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
989987
&flags);
990988
if (prev_lruvec != lruvec)
991989
lock_batch = 0;
992990

993-
del_page_from_lru_list(page, lruvec);
994-
__clear_page_lru_flags(page);
991+
lruvec_del_folio(lruvec, folio);
992+
__folio_clear_lru_flags(folio);
995993
}
996994

997995
/*
@@ -1000,13 +998,13 @@ void release_pages(struct page **pages, int nr)
1000998
* found set here. This does not indicate a problem, unless
1001999
* "unevictable_pgs_cleared" appears worryingly large.
10021000
*/
1003-
if (unlikely(PageMlocked(page))) {
1004-
__ClearPageMlocked(page);
1005-
dec_zone_page_state(page, NR_MLOCK);
1001+
if (unlikely(folio_test_mlocked(folio))) {
1002+
__folio_clear_mlocked(folio);
1003+
zone_stat_sub_folio(folio, NR_MLOCK);
10061004
count_vm_event(UNEVICTABLE_PGCLEARED);
10071005
}
10081006

1009-
list_add(&page->lru, &pages_to_free);
1007+
list_add(&folio->lru, &pages_to_free);
10101008
}
10111009
if (lruvec)
10121010
unlock_page_lruvec_irqrestore(lruvec, flags);

0 commit comments

Comments
 (0)