@@ -163,35 +163,34 @@ static int start_khugepaged(void)
163
163
}
164
164
165
165
static atomic_t huge_zero_refcount ;
166
- static unsigned long huge_zero_pfn __read_mostly ;
166
+ static struct page * huge_zero_page __read_mostly ;
167
167
168
- static inline bool is_huge_zero_pfn ( unsigned long pfn )
168
+ static inline bool is_huge_zero_page ( struct page * page )
169
169
{
170
- unsigned long zero_pfn = ACCESS_ONCE (huge_zero_pfn );
171
- return zero_pfn && pfn == zero_pfn ;
170
+ return ACCESS_ONCE (huge_zero_page ) == page ;
172
171
}
173
172
174
173
static inline bool is_huge_zero_pmd (pmd_t pmd )
175
174
{
176
- return is_huge_zero_pfn ( pmd_pfn (pmd ));
175
+ return is_huge_zero_page ( pmd_page (pmd ));
177
176
}
178
177
179
- static unsigned long get_huge_zero_page (void )
178
+ static struct page * get_huge_zero_page (void )
180
179
{
181
180
struct page * zero_page ;
182
181
retry :
183
182
if (likely (atomic_inc_not_zero (& huge_zero_refcount )))
184
- return ACCESS_ONCE (huge_zero_pfn );
183
+ return ACCESS_ONCE (huge_zero_page );
185
184
186
185
zero_page = alloc_pages ((GFP_TRANSHUGE | __GFP_ZERO ) & ~__GFP_MOVABLE ,
187
186
HPAGE_PMD_ORDER );
188
187
if (!zero_page ) {
189
188
count_vm_event (THP_ZERO_PAGE_ALLOC_FAILED );
190
- return 0 ;
189
+ return NULL ;
191
190
}
192
191
count_vm_event (THP_ZERO_PAGE_ALLOC );
193
192
preempt_disable ();
194
- if (cmpxchg (& huge_zero_pfn , 0 , page_to_pfn ( zero_page ) )) {
193
+ if (cmpxchg (& huge_zero_page , NULL , zero_page )) {
195
194
preempt_enable ();
196
195
__free_page (zero_page );
197
196
goto retry ;
@@ -200,7 +199,7 @@ static unsigned long get_huge_zero_page(void)
200
199
/* We take additional reference here. It will be put back by shrinker */
201
200
atomic_set (& huge_zero_refcount , 2 );
202
201
preempt_enable ();
203
- return ACCESS_ONCE (huge_zero_pfn );
202
+ return ACCESS_ONCE (huge_zero_page );
204
203
}
205
204
206
205
static void put_huge_zero_page (void )
@@ -220,9 +219,9 @@ static int shrink_huge_zero_page(struct shrinker *shrink,
220
219
return atomic_read (& huge_zero_refcount ) == 1 ? HPAGE_PMD_NR : 0 ;
221
220
222
221
if (atomic_cmpxchg (& huge_zero_refcount , 1 , 0 ) == 1 ) {
223
- unsigned long zero_pfn = xchg (& huge_zero_pfn , 0 );
224
- BUG_ON (zero_pfn == 0 );
225
- __free_page (__pfn_to_page ( zero_pfn ) );
222
+ struct page * zero_page = xchg (& huge_zero_page , NULL );
223
+ BUG_ON (zero_page == NULL );
224
+ __free_page (zero_page );
226
225
}
227
226
228
227
return 0 ;
@@ -764,12 +763,12 @@ static inline struct page *alloc_hugepage(int defrag)
764
763
765
764
static bool set_huge_zero_page (pgtable_t pgtable , struct mm_struct * mm ,
766
765
struct vm_area_struct * vma , unsigned long haddr , pmd_t * pmd ,
767
- unsigned long zero_pfn )
766
+ struct page * zero_page )
768
767
{
769
768
pmd_t entry ;
770
769
if (!pmd_none (* pmd ))
771
770
return false;
772
- entry = pfn_pmd ( zero_pfn , vma -> vm_page_prot );
771
+ entry = mk_pmd ( zero_page , vma -> vm_page_prot );
773
772
entry = pmd_wrprotect (entry );
774
773
entry = pmd_mkhuge (entry );
775
774
set_pmd_at (mm , haddr , pmd , entry );
@@ -794,20 +793,20 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
794
793
if (!(flags & FAULT_FLAG_WRITE ) &&
795
794
transparent_hugepage_use_zero_page ()) {
796
795
pgtable_t pgtable ;
797
- unsigned long zero_pfn ;
796
+ struct page * zero_page ;
798
797
bool set ;
799
798
pgtable = pte_alloc_one (mm , haddr );
800
799
if (unlikely (!pgtable ))
801
800
return VM_FAULT_OOM ;
802
- zero_pfn = get_huge_zero_page ();
803
- if (unlikely (!zero_pfn )) {
801
+ zero_page = get_huge_zero_page ();
802
+ if (unlikely (!zero_page )) {
804
803
pte_free (mm , pgtable );
805
804
count_vm_event (THP_FAULT_FALLBACK );
806
805
goto out ;
807
806
}
808
807
spin_lock (& mm -> page_table_lock );
809
808
set = set_huge_zero_page (pgtable , mm , vma , haddr , pmd ,
810
- zero_pfn );
809
+ zero_page );
811
810
spin_unlock (& mm -> page_table_lock );
812
811
if (!set ) {
813
812
pte_free (mm , pgtable );
@@ -886,16 +885,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
886
885
* a page table.
887
886
*/
888
887
if (is_huge_zero_pmd (pmd )) {
889
- unsigned long zero_pfn ;
888
+ struct page * zero_page ;
890
889
bool set ;
891
890
/*
892
891
* get_huge_zero_page() will never allocate a new page here,
893
892
* since we already have a zero page to copy. It just takes a
894
893
* reference.
895
894
*/
896
- zero_pfn = get_huge_zero_page ();
895
+ zero_page = get_huge_zero_page ();
897
896
set = set_huge_zero_page (pgtable , dst_mm , vma , addr , dst_pmd ,
898
- zero_pfn );
897
+ zero_page );
899
898
BUG_ON (!set ); /* unexpected !pmd_none(dst_pmd) */
900
899
ret = 0 ;
901
900
goto out_unlock ;
@@ -1812,7 +1811,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1812
1811
struct anon_vma * anon_vma ;
1813
1812
int ret = 1 ;
1814
1813
1815
- BUG_ON (is_huge_zero_pfn ( page_to_pfn ( page ) ));
1814
+ BUG_ON (is_huge_zero_page ( page ));
1816
1815
BUG_ON (!PageAnon (page ));
1817
1816
1818
1817
/*
0 commit comments