@@ -941,6 +941,103 @@ static inline int free_pages_check(struct page *page)
941
941
return 1 ;
942
942
}
943
943
944
+ static int free_tail_pages_check (struct page * head_page , struct page * page )
945
+ {
946
+ int ret = 1 ;
947
+
948
+ /*
949
+ * We rely page->lru.next never has bit 0 set, unless the page
950
+ * is PageTail(). Let's make sure that's true even for poisoned ->lru.
951
+ */
952
+ BUILD_BUG_ON ((unsigned long )LIST_POISON1 & 1 );
953
+
954
+ if (!IS_ENABLED (CONFIG_DEBUG_VM )) {
955
+ ret = 0 ;
956
+ goto out ;
957
+ }
958
+ switch (page - head_page ) {
959
+ case 1 :
960
+ /* the first tail page: ->mapping is compound_mapcount() */
961
+ if (unlikely (compound_mapcount (page ))) {
962
+ bad_page (page , "nonzero compound_mapcount" , 0 );
963
+ goto out ;
964
+ }
965
+ break ;
966
+ case 2 :
967
+ /*
968
+ * the second tail page: ->mapping is
969
+ * page_deferred_list().next -- ignore value.
970
+ */
971
+ break ;
972
+ default :
973
+ if (page -> mapping != TAIL_MAPPING ) {
974
+ bad_page (page , "corrupted mapping in tail page" , 0 );
975
+ goto out ;
976
+ }
977
+ break ;
978
+ }
979
+ if (unlikely (!PageTail (page ))) {
980
+ bad_page (page , "PageTail not set" , 0 );
981
+ goto out ;
982
+ }
983
+ if (unlikely (compound_head (page ) != head_page )) {
984
+ bad_page (page , "compound_head not consistent" , 0 );
985
+ goto out ;
986
+ }
987
+ ret = 0 ;
988
+ out :
989
+ page -> mapping = NULL ;
990
+ clear_compound_head (page );
991
+ return ret ;
992
+ }
993
+
994
+ static bool free_pages_prepare (struct page * page , unsigned int order );
995
+
996
+ #ifdef CONFIG_DEBUG_VM
997
+ static inline bool free_pcp_prepare (struct page * page )
998
+ {
999
+ return free_pages_prepare (page , 0 );
1000
+ }
1001
+
1002
+ static inline bool bulkfree_pcp_prepare (struct page * page )
1003
+ {
1004
+ return false;
1005
+ }
1006
+ #else
1007
+ static bool free_pcp_prepare (struct page * page )
1008
+ {
1009
+ VM_BUG_ON_PAGE (PageTail (page ), page );
1010
+
1011
+ trace_mm_page_free (page , 0 );
1012
+ kmemcheck_free_shadow (page , 0 );
1013
+ kasan_free_pages (page , 0 );
1014
+
1015
+ if (PageAnonHead (page ))
1016
+ page -> mapping = NULL ;
1017
+
1018
+ reset_page_owner (page , 0 );
1019
+
1020
+ if (!PageHighMem (page )) {
1021
+ debug_check_no_locks_freed (page_address (page ),
1022
+ PAGE_SIZE );
1023
+ debug_check_no_obj_freed (page_address (page ),
1024
+ PAGE_SIZE );
1025
+ }
1026
+ arch_free_page (page , 0 );
1027
+ kernel_poison_pages (page , 0 , 0 );
1028
+ kernel_map_pages (page , 0 , 0 );
1029
+
1030
+ page_cpupid_reset_last (page );
1031
+ page -> flags &= ~PAGE_FLAGS_CHECK_AT_PREP ;
1032
+ return true;
1033
+ }
1034
+
1035
+ static bool bulkfree_pcp_prepare (struct page * page )
1036
+ {
1037
+ return free_pages_check (page );
1038
+ }
1039
+ #endif /* CONFIG_DEBUG_VM */
1040
+
944
1041
/*
945
1042
* Frees a number of pages from the PCP lists
946
1043
* Assumes all pages on list are in same zone, and of same order.
@@ -1002,6 +1099,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
1002
1099
if (unlikely (isolated_pageblocks ))
1003
1100
mt = get_pageblock_migratetype (page );
1004
1101
1102
+ if (bulkfree_pcp_prepare (page ))
1103
+ continue ;
1104
+
1005
1105
__free_one_page (page , page_to_pfn (page ), zone , 0 , mt );
1006
1106
trace_mm_page_pcpu_drain (page , 0 , mt );
1007
1107
} while (-- count && -- batch_free && !list_empty (list ));
@@ -1028,56 +1128,6 @@ static void free_one_page(struct zone *zone,
1028
1128
spin_unlock (& zone -> lock );
1029
1129
}
1030
1130
1031
- static int free_tail_pages_check (struct page * head_page , struct page * page )
1032
- {
1033
- int ret = 1 ;
1034
-
1035
- /*
1036
- * We rely page->lru.next never has bit 0 set, unless the page
1037
- * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1038
- */
1039
- BUILD_BUG_ON ((unsigned long )LIST_POISON1 & 1 );
1040
-
1041
- if (!IS_ENABLED (CONFIG_DEBUG_VM )) {
1042
- ret = 0 ;
1043
- goto out ;
1044
- }
1045
- switch (page - head_page ) {
1046
- case 1 :
1047
- /* the first tail page: ->mapping is compound_mapcount() */
1048
- if (unlikely (compound_mapcount (page ))) {
1049
- bad_page (page , "nonzero compound_mapcount" , 0 );
1050
- goto out ;
1051
- }
1052
- break ;
1053
- case 2 :
1054
- /*
1055
- * the second tail page: ->mapping is
1056
- * page_deferred_list().next -- ignore value.
1057
- */
1058
- break ;
1059
- default :
1060
- if (page -> mapping != TAIL_MAPPING ) {
1061
- bad_page (page , "corrupted mapping in tail page" , 0 );
1062
- goto out ;
1063
- }
1064
- break ;
1065
- }
1066
- if (unlikely (!PageTail (page ))) {
1067
- bad_page (page , "PageTail not set" , 0 );
1068
- goto out ;
1069
- }
1070
- if (unlikely (compound_head (page ) != head_page )) {
1071
- bad_page (page , "compound_head not consistent" , 0 );
1072
- goto out ;
1073
- }
1074
- ret = 0 ;
1075
- out :
1076
- page -> mapping = NULL ;
1077
- clear_compound_head (page );
1078
- return ret ;
1079
- }
1080
-
1081
1131
static void __meminit __init_single_page (struct page * page , unsigned long pfn ,
1082
1132
unsigned long zone , int nid )
1083
1133
{
@@ -2339,7 +2389,7 @@ void free_hot_cold_page(struct page *page, bool cold)
2339
2389
unsigned long pfn = page_to_pfn (page );
2340
2390
int migratetype ;
2341
2391
2342
- if (!free_pages_prepare (page , 0 ))
2392
+ if (!free_pcp_prepare (page ))
2343
2393
return ;
2344
2394
2345
2395
migratetype = get_pfnblock_migratetype (page , pfn );
0 commit comments