@@ -21,15 +21,16 @@ struct hugetlb_cgroup;
21
21
struct resv_map ;
22
22
struct file_region ;
23
23
24
+ #ifdef CONFIG_CGROUP_HUGETLB
24
25
/*
25
26
* Minimum page order trackable by hugetlb cgroup.
26
27
* At least 4 pages are necessary for all the tracking information.
27
- * The second tail page (hpage[2]) is the fault usage cgroup.
28
- * The third tail page (hpage[3]) is the reservation usage cgroup.
28
+ * The second tail page (hpage[SUBPAGE_INDEX_CGROUP]) is the fault
29
+ * usage cgroup. The third tail page (hpage[SUBPAGE_INDEX_CGROUP_RSVD])
30
+ * is the reservation usage cgroup.
29
31
*/
30
- #define HUGETLB_CGROUP_MIN_ORDER 2
32
+ #define HUGETLB_CGROUP_MIN_ORDER order_base_2(__MAX_CGROUP_SUBPAGE_INDEX + 1)
31
33
32
- #ifdef CONFIG_CGROUP_HUGETLB
33
34
enum hugetlb_memory_event {
34
35
HUGETLB_MAX ,
35
36
HUGETLB_NR_MEMORY_EVENTS ,
@@ -66,9 +67,9 @@ __hugetlb_cgroup_from_page(struct page *page, bool rsvd)
66
67
if (compound_order (page ) < HUGETLB_CGROUP_MIN_ORDER )
67
68
return NULL ;
68
69
if (rsvd )
69
- return (struct hugetlb_cgroup * ) page [ 3 ]. private ;
70
+ return (void * ) page_private ( page + SUBPAGE_INDEX_CGROUP_RSVD ) ;
70
71
else
71
- return (struct hugetlb_cgroup * ) page [ 2 ]. private ;
72
+ return (void * ) page_private ( page + SUBPAGE_INDEX_CGROUP ) ;
72
73
}
73
74
74
75
static inline struct hugetlb_cgroup * hugetlb_cgroup_from_page (struct page * page )
@@ -90,9 +91,11 @@ static inline int __set_hugetlb_cgroup(struct page *page,
90
91
if (compound_order (page ) < HUGETLB_CGROUP_MIN_ORDER )
91
92
return -1 ;
92
93
if (rsvd )
93
- page [3 ].private = (unsigned long )h_cg ;
94
+ set_page_private (page + SUBPAGE_INDEX_CGROUP_RSVD ,
95
+ (unsigned long )h_cg );
94
96
else
95
- page [2 ].private = (unsigned long )h_cg ;
97
+ set_page_private (page + SUBPAGE_INDEX_CGROUP ,
98
+ (unsigned long )h_cg );
96
99
return 0 ;
97
100
}
98
101
0 commit comments