Skip to content

Commit 91030ca

Browse files
Hugh DickinsIngo Molnar
authored andcommitted
x86: unsigned long pte_pfn
pte_pfn() has always been of type unsigned long, even on 32-bit PAE; but in the current tip/next/mm tree it works out to be unsigned long long on 64-bit, which gives an irritating warning if you try to printk a pfn with the usual %lx. Now use the same pte_pfn() function, moved from pgtable-3level.h to pgtable.h, for all models: as suggested by Jeremy Fitzhardinge. And pte_page() can well move along with it (remaining a macro to avoid dependence on mm_types.h). Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Jeremy Fitzhardinge <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent adee14b commit 91030ca

File tree

4 files changed

+7
-11
lines changed

4 files changed

+7
-11
lines changed

include/asm-x86/pgtable-2level.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
5353
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
5454
#endif
5555

56-
#define pte_page(x) pfn_to_page(pte_pfn(x))
5756
#define pte_none(x) (!(x).pte_low)
58-
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
5957

6058
/*
6159
* Bits 0, 6 and 7 are taken, split up the 29 bits of offset

include/asm-x86/pgtable-3level.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
151151
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152152
}
153153

154-
#define pte_page(x) pfn_to_page(pte_pfn(x))
155-
156154
static inline int pte_none(pte_t pte)
157155
{
158156
return !pte.pte_low && !pte.pte_high;
159157
}
160158

161-
static inline unsigned long pte_pfn(pte_t pte)
162-
{
163-
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
164-
}
165-
166159
/*
167160
* Bits 0, 6 and 7 are taken in the low part of the pte,
168161
* put the 32 bits of offset into the high part.

include/asm-x86/pgtable.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,13 @@ static inline int pte_special(pte_t pte)
186186
return pte_val(pte) & _PAGE_SPECIAL;
187187
}
188188

189+
static inline unsigned long pte_pfn(pte_t pte)
190+
{
191+
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
192+
}
193+
194+
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
195+
189196
static inline int pmd_large(pmd_t pte)
190197
{
191198
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==

include/asm-x86/pgtable_64.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd)
175175
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176176

177177
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178-
#define pte_page(x) pfn_to_page(pte_pfn((x)))
179-
#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
180178

181179
/*
182180
* Macro to mark a page protection value as "uncacheable".

0 commit comments

Comments
 (0)