Skip to content

Commit e8c6158

Browse files
kiryltorvalds
authored andcommitted
mm: consolidate all page-flags helpers in <linux/page-flags.h>
Currently we take a naive approach to page flags on compound pages - we set the flag on the page without consideration if the flag makes sense for tail page or for compound page in general. This patchset try to sort this out by defining per-flag policy on what need to be done if page-flag helper operate on compound page. The last patch in the patchset also sanitizes usege of page->mapping for tail pages. We don't define the meaning of page->mapping for tail pages. Currently it's always NULL, which can be inconsistent with head page and potentially lead to problems. For now I caught one case of illegal usage of page flags or ->mapping: sound subsystem allocates pages with __GFP_COMP and maps them with PTEs. It leads to setting dirty bit on tail pages and access to tail_page's ->mapping. I don't see any bad behaviour caused by this, but worth fixing anyway. This patchset makes more sense if you take my THP refcounting into account: we will see more compound pages mapped with PTEs and we need to define behaviour of flags on compound pages to avoid bugs. This patch (of 16): We have page-flags helper function declarations/definitions spread over several header files. Let's consolidate them in <linux/page-flags.h>. Signed-off-by: Kirill A. Shutemov <[email protected]> Cc: Andrea Arcangeli <[email protected]> Acked-by: Hugh Dickins <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Steve Capper <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Jerome Marchand <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 64d37a2 commit e8c6158

File tree

4 files changed

+96
-105
lines changed

4 files changed

+96
-105
lines changed

include/linux/hugetlb.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@ extern int hugetlb_max_hstate __read_mostly;
4141
struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
4242
void hugepage_put_subpool(struct hugepage_subpool *spool);
4343

44-
int PageHuge(struct page *page);
45-
4644
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
4745
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
4846
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -109,11 +107,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
109107

110108
#else /* !CONFIG_HUGETLB_PAGE */
111109

112-
static inline int PageHuge(struct page *page)
113-
{
114-
return 0;
115-
}
116-
117110
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
118111
{
119112
}

include/linux/ksm.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -35,18 +35,6 @@ static inline void ksm_exit(struct mm_struct *mm)
3535
__ksm_exit(mm);
3636
}
3737

38-
/*
39-
* A KSM page is one of those write-protected "shared pages" or "merged pages"
40-
* which KSM maps into multiple mms, wherever identical anonymous page content
41-
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
42-
* anon_vma, but to that page's node of the stable tree.
43-
*/
44-
static inline int PageKsm(struct page *page)
45-
{
46-
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
47-
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
48-
}
49-
5038
static inline struct stable_node *page_stable_node(struct page *page)
5139
{
5240
return PageKsm(page) ? page_rmapping(page) : NULL;
@@ -87,11 +75,6 @@ static inline void ksm_exit(struct mm_struct *mm)
8775
{
8876
}
8977

90-
static inline int PageKsm(struct page *page)
91-
{
92-
return 0;
93-
}
94-
9578
#ifdef CONFIG_MMU
9679
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
9780
unsigned long end, int advice, unsigned long *vm_flags)

include/linux/mm.h

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -494,15 +494,6 @@ static inline int page_count(struct page *page)
494494
return atomic_read(&compound_head(page)->_count);
495495
}
496496

497-
#ifdef CONFIG_HUGETLB_PAGE
498-
extern int PageHeadHuge(struct page *page_head);
499-
#else /* CONFIG_HUGETLB_PAGE */
500-
static inline int PageHeadHuge(struct page *page_head)
501-
{
502-
return 0;
503-
}
504-
#endif /* CONFIG_HUGETLB_PAGE */
505-
506497
static inline bool __compound_tail_refcounted(struct page *page)
507498
{
508499
return !PageSlab(page) && !PageHeadHuge(page);
@@ -571,53 +562,6 @@ static inline void init_page_count(struct page *page)
571562
atomic_set(&page->_count, 1);
572563
}
573564

574-
/*
575-
* PageBuddy() indicate that the page is free and in the buddy system
576-
* (see mm/page_alloc.c).
577-
*
578-
* PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
579-
* -2 so that an underflow of the page_mapcount() won't be mistaken
580-
* for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
581-
* efficiently by most CPU architectures.
582-
*/
583-
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
584-
585-
static inline int PageBuddy(struct page *page)
586-
{
587-
return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
588-
}
589-
590-
static inline void __SetPageBuddy(struct page *page)
591-
{
592-
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
593-
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
594-
}
595-
596-
static inline void __ClearPageBuddy(struct page *page)
597-
{
598-
VM_BUG_ON_PAGE(!PageBuddy(page), page);
599-
atomic_set(&page->_mapcount, -1);
600-
}
601-
602-
#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
603-
604-
static inline int PageBalloon(struct page *page)
605-
{
606-
return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
607-
}
608-
609-
static inline void __SetPageBalloon(struct page *page)
610-
{
611-
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
612-
atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
613-
}
614-
615-
static inline void __ClearPageBalloon(struct page *page)
616-
{
617-
VM_BUG_ON_PAGE(!PageBalloon(page), page);
618-
atomic_set(&page->_mapcount, -1);
619-
}
620-
621565
void put_page(struct page *page);
622566
void put_pages_list(struct list_head *pages);
623567

@@ -1006,26 +950,6 @@ void page_address_init(void);
1006950
#define page_address_init() do { } while(0)
1007951
#endif
1008952

1009-
/*
1010-
* On an anonymous page mapped into a user virtual memory area,
1011-
* page->mapping points to its anon_vma, not to a struct address_space;
1012-
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
1013-
*
1014-
* On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
1015-
* the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
1016-
* and then page->mapping points, not to an anon_vma, but to a private
1017-
* structure which KSM associates with that merged page. See ksm.h.
1018-
*
1019-
* PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
1020-
*
1021-
* Please note that, confusingly, "page_mapping" refers to the inode
1022-
* address_space which maps the page from disk; whereas "page_mapped"
1023-
* refers to user virtual address space into which the page is mapped.
1024-
*/
1025-
#define PAGE_MAPPING_ANON 1
1026-
#define PAGE_MAPPING_KSM 2
1027-
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1028-
1029953
extern struct address_space *page_mapping(struct page *page);
1030954

1031955
/* Neutral page->mapping pointer to address_space or anon_vma or other */
@@ -1045,11 +969,6 @@ struct address_space *page_file_mapping(struct page *page)
1045969
return page->mapping;
1046970
}
1047971

1048-
static inline int PageAnon(struct page *page)
1049-
{
1050-
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
1051-
}
1052-
1053972
/*
1054973
* Return the pagecache index of the passed page. Regular pagecache pages
1055974
* use ->index whereas swapcache pages use ->private

include/linux/page-flags.h

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison)
289289
#define __PG_HWPOISON 0
290290
#endif
291291

292+
/*
293+
* On an anonymous page mapped into a user virtual memory area,
294+
* page->mapping points to its anon_vma, not to a struct address_space;
295+
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
296+
*
297+
* On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
298+
* the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
299+
* and then page->mapping points, not to an anon_vma, but to a private
300+
* structure which KSM associates with that merged page. See ksm.h.
301+
*
302+
* PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
303+
*
304+
* Please note that, confusingly, "page_mapping" refers to the inode
305+
* address_space which maps the page from disk; whereas "page_mapped"
306+
* refers to user virtual address space into which the page is mapped.
307+
*/
308+
#define PAGE_MAPPING_ANON 1
309+
#define PAGE_MAPPING_KSM 2
310+
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
311+
312+
static inline int PageAnon(struct page *page)
313+
{
314+
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
315+
}
316+
317+
#ifdef CONFIG_KSM
318+
/*
319+
* A KSM page is one of those write-protected "shared pages" or "merged pages"
320+
* which KSM maps into multiple mms, wherever identical anonymous page content
321+
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
322+
* anon_vma, but to that page's node of the stable tree.
323+
*/
324+
static inline int PageKsm(struct page *page)
325+
{
326+
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
327+
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
328+
}
329+
#else
330+
TESTPAGEFLAG_FALSE(Ksm)
331+
#endif
332+
292333
u64 stable_page_flags(struct page *page);
293334

294335
static inline int PageUptodate(struct page *page)
@@ -426,6 +467,14 @@ static inline void ClearPageCompound(struct page *page)
426467

427468
#endif /* !PAGEFLAGS_EXTENDED */
428469

470+
#ifdef CONFIG_HUGETLB_PAGE
471+
int PageHuge(struct page *page);
472+
int PageHeadHuge(struct page *page);
473+
#else
474+
TESTPAGEFLAG_FALSE(Huge)
475+
TESTPAGEFLAG_FALSE(HeadHuge)
476+
#endif
477+
429478
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
430479
/*
431480
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -479,6 +528,53 @@ static inline int PageTransTail(struct page *page)
479528
}
480529
#endif
481530

531+
/*
532+
* PageBuddy() indicate that the page is free and in the buddy system
533+
* (see mm/page_alloc.c).
534+
*
535+
* PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
536+
* -2 so that an underflow of the page_mapcount() won't be mistaken
537+
* for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
538+
* efficiently by most CPU architectures.
539+
*/
540+
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
541+
542+
static inline int PageBuddy(struct page *page)
543+
{
544+
return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
545+
}
546+
547+
static inline void __SetPageBuddy(struct page *page)
548+
{
549+
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
550+
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
551+
}
552+
553+
static inline void __ClearPageBuddy(struct page *page)
554+
{
555+
VM_BUG_ON_PAGE(!PageBuddy(page), page);
556+
atomic_set(&page->_mapcount, -1);
557+
}
558+
559+
#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
560+
561+
static inline int PageBalloon(struct page *page)
562+
{
563+
return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
564+
}
565+
566+
static inline void __SetPageBalloon(struct page *page)
567+
{
568+
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
569+
atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
570+
}
571+
572+
static inline void __ClearPageBalloon(struct page *page)
573+
{
574+
VM_BUG_ON_PAGE(!PageBalloon(page), page);
575+
atomic_set(&page->_mapcount, -1);
576+
}
577+
482578
/*
483579
* If network-based swap is enabled, sl*b must keep track of whether pages
484580
* were allocated from pfmemalloc reserves.

0 commit comments

Comments
 (0)