Skip to content

Commit ac29c64

Browse files
kvaneeshmpe
authored andcommitted
powerpc/mm: Replace _PAGE_USER with _PAGE_PRIVILEGED
_PAGE_PRIVILEGED means the page can be accessed only by the kernel. This is done to keep pte bits similar to PowerISA 3.0 Radix PTE format. User pages are now marked by clearing _PAGE_PRIVILEGED bit. Previously we allowed the kernel to have a privileged page in the lower address range (USER_REGION). With this patch such access is denied. We also prevent a kernel access to a non-privileged page in higher address range (ie, REGION_ID != 0). Both the above access scenarios should never happen. Cc: Arnd Bergmann <[email protected]> Cc: Jeremy Kerr <[email protected]> Cc: Frederic Barrat <[email protected]> Acked-by: Ian Munsie <[email protected]> Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent e7bfc46 commit ac29c64

File tree

12 files changed

+80
-37
lines changed

12 files changed

+80
-37
lines changed

arch/powerpc/include/asm/book3s/64/hash.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#define _PAGE_READ 0x00004 /* read access allowed */
2121
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
2222
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
23-
#define _PAGE_USER 0x00008 /* page may be accessed by userspace */
23+
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
2424
#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
2525
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
2626
#define _PAGE_COHERENT 0x0
@@ -114,10 +114,13 @@
114114
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
115115
#endif /* CONFIG_PPC_MM_SLICES */
116116

117-
/* No separate kernel read-only */
118-
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
117+
/*
118+
* No separate kernel read-only, user access blocked by key
119+
*/
120+
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
119121
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
120-
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
122+
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
123+
_PAGE_RW | _PAGE_EXEC)
121124

122125
/* Strong Access Ordering */
123126
#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
@@ -147,7 +150,7 @@
147150
*/
148151
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
149152
_PAGE_WRITETHRU | _PAGE_4K_PFN | \
150-
_PAGE_USER | _PAGE_ACCESSED | _PAGE_READ |\
153+
_PAGE_PRIVILEGED | _PAGE_ACCESSED | _PAGE_READ |\
151154
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
152155
_PAGE_SOFT_DIRTY)
153156
/*
@@ -169,16 +172,13 @@
169172
*
170173
* Note due to the way vm flags are laid out, the bits are XWR
171174
*/
172-
#define PAGE_NONE __pgprot(_PAGE_BASE)
173-
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
174-
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
175-
_PAGE_EXEC)
176-
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
177-
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
178-
_PAGE_EXEC)
179-
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ)
180-
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_READ| \
181-
_PAGE_EXEC)
175+
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
176+
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
177+
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
178+
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
179+
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
180+
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
181+
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
182182

183183
#define __P000 PAGE_NONE
184184
#define __P001 PAGE_READONLY
@@ -419,8 +419,8 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
419419
*/
420420
static inline int pte_protnone(pte_t pte)
421421
{
422-
return (pte_val(pte) &
423-
(_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
422+
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
423+
(_PAGE_PRESENT | _PAGE_PRIVILEGED);
424424
}
425425
#endif /* CONFIG_NUMA_BALANCING */
426426

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ extern struct page *pgd_page(pgd_t pgd);
187187

188188
static inline bool pte_user(pte_t pte)
189189
{
190-
return !!(pte_val(pte) & _PAGE_USER);
190+
return !(pte_val(pte) & _PAGE_PRIVILEGED);
191191
}
192192

193193
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -211,6 +211,22 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
211211
}
212212
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
213213

214+
static inline bool check_pte_access(unsigned long access, unsigned long ptev)
215+
{
216+
/*
217+
* This check for _PAGE_RWX and _PAGE_PRESENT bits
218+
*/
219+
if (access & ~ptev)
220+
return false;
221+
/*
222+
* This check for access to privilege space
223+
*/
224+
if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
225+
return false;
226+
227+
return true;
228+
}
229+
214230
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
215231
void pgtable_cache_init(void);
216232

arch/powerpc/mm/hash64_4k.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
3737
if (unlikely(old_pte & _PAGE_BUSY))
3838
return 0;
3939
/* If PTE permissions don't match, take page fault */
40-
if (unlikely(access & ~old_pte))
40+
if (unlikely(!check_pte_access(access, old_pte)))
4141
return 1;
4242
/*
4343
* Try to lock the PTE, add ACCESSED and DIRTY if it was

arch/powerpc/mm/hash64_64k.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
6969
if (unlikely(old_pte & _PAGE_BUSY))
7070
return 0;
7171
/* If PTE permissions don't match, take page fault */
72-
if (unlikely(access & ~old_pte))
72+
if (unlikely(!check_pte_access(access, old_pte)))
7373
return 1;
7474
/*
7575
* Try to lock the PTE, add ACCESSED and DIRTY if it was
@@ -237,7 +237,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
237237
if (unlikely(old_pte & _PAGE_BUSY))
238238
return 0;
239239
/* If PTE permissions don't match, take page fault */
240-
if (unlikely(access & ~old_pte))
240+
if (unlikely(!check_pte_access(access, old_pte)))
241241
return 1;
242242
/*
243243
* Check if PTE has the cache-inhibit bit set

arch/powerpc/mm/hash_utils_64.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
174174
* User area is mapped with PP=0x2 for read/write
175175
* or PP=0x3 for read-only (including writeable but clean pages).
176176
*/
177-
if (pteflags & _PAGE_USER) {
177+
if (!(pteflags & _PAGE_PRIVILEGED)) {
178178
if (pteflags & _PAGE_RWX)
179179
rflags |= 0x2;
180180
if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
@@ -1090,7 +1090,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
10901090
/* Pre-check access permissions (will be re-checked atomically
10911091
* in __hash_page_XX but this pre-check is a fast path
10921092
*/
1093-
if (access & ~pte_val(*ptep)) {
1093+
if (!check_pte_access(access, pte_val(*ptep))) {
10941094
DBG_LOW(" no access !\n");
10951095
rc = 1;
10961096
goto bail;
@@ -1228,12 +1228,16 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
12281228
if (dsisr & DSISR_ISSTORE)
12291229
access |= _PAGE_WRITE;
12301230
/*
1231-
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1232-
* accessing a userspace segment (even from the kernel). We assume
1233-
* kernel addresses always have the high bit set.
1231+
* We set _PAGE_PRIVILEGED only when
1232+
* kernel mode access kernel space.
1233+
*
1234+
* _PAGE_PRIVILEGED is NOT set
1235+
* 1) when kernel mode access user space
1236+
* 2) user space access kernel space.
12341237
*/
1238+
access |= _PAGE_PRIVILEGED;
12351239
if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
1236-
access |= _PAGE_USER;
1240+
access &= ~_PAGE_PRIVILEGED;
12371241

12381242
if (trap == 0x400)
12391243
access |= _PAGE_EXEC;

arch/powerpc/mm/hugepage-hash64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
4040
if (unlikely(old_pmd & _PAGE_BUSY))
4141
return 0;
4242
/* If PMD permissions don't match, take page fault */
43-
if (unlikely(access & ~old_pmd))
43+
if (unlikely(!check_pte_access(access, old_pmd)))
4444
return 1;
4545
/*
4646
* Try to lock the PTE, add ACCESSED and DIRTY if it was

arch/powerpc/mm/hugetlbpage-hash64.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,9 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
5050
if (unlikely(old_pte & _PAGE_BUSY))
5151
return 0;
5252
/* If PTE permissions don't match, take page fault */
53-
if (unlikely(access & ~old_pte))
53+
if (unlikely(!check_pte_access(access, old_pte)))
5454
return 1;
55+
5556
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
5657
* a write access */
5758
new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;

arch/powerpc/mm/hugetlbpage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1003,7 +1003,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
10031003
end = pte_end;
10041004

10051005
pte = READ_ONCE(*ptep);
1006-
mask = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
1006+
mask = _PAGE_PRESENT | _PAGE_READ;
10071007
if (write)
10081008
mask |= _PAGE_WRITE;
10091009

arch/powerpc/mm/pgtable.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,20 @@ static inline int is_exec_fault(void)
4343
*/
4444
static inline int pte_looks_normal(pte_t pte)
4545
{
46+
47+
#if defined(CONFIG_PPC_BOOK3S_64)
48+
if ((pte_val(pte) &
49+
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
50+
_PAGE_PRESENT) {
51+
if (pte_user(pte))
52+
return 1;
53+
}
54+
return 0;
55+
#else
4656
return (pte_val(pte) &
47-
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
48-
(_PAGE_PRESENT | _PAGE_USER);
57+
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
58+
(_PAGE_PRESENT | _PAGE_USER);
59+
#endif
4960
}
5061

5162
static struct page *maybe_pte_to_page(pte_t pte)

arch/powerpc/mm/pgtable_64.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -280,8 +280,17 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
280280
if (flags & _PAGE_WRITE)
281281
flags |= _PAGE_DIRTY;
282282

283-
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
284-
flags &= ~(_PAGE_USER | _PAGE_EXEC);
283+
/* we don't want to let _PAGE_EXEC leak out */
284+
flags &= ~_PAGE_EXEC;
285+
/*
286+
* Force kernel mapping.
287+
*/
288+
#if defined(CONFIG_PPC_BOOK3S_64)
289+
flags |= _PAGE_PRIVILEGED;
290+
#else
291+
flags &= ~_PAGE_USER;
292+
#endif
293+
285294

286295
#ifdef _PAGE_BAP_SR
287296
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
@@ -664,7 +673,7 @@ void pmdp_huge_split_prepare(struct vm_area_struct *vma,
664673
* the translation is still valid, because we will withdraw
665674
* pgtable_t after this.
666675
*/
667-
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
676+
pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
668677
}
669678

670679

arch/powerpc/platforms/cell/spufs/fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ int spufs_handle_class1(struct spu_context *ctx)
141141
/* we must not hold the lock when entering copro_handle_mm_fault */
142142
spu_release(ctx);
143143

144-
access = (_PAGE_PRESENT | _PAGE_READ | _PAGE_USER);
144+
access = (_PAGE_PRESENT | _PAGE_READ);
145145
access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
146146
local_irq_save(flags);
147147
ret = hash_page(ea, access, 0x300, dsisr);

drivers/misc/cxl/fault.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,8 +152,10 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
152152
access = _PAGE_PRESENT | _PAGE_READ;
153153
if (dsisr & CXL_PSL_DSISR_An_S)
154154
access |= _PAGE_WRITE;
155+
156+
access |= _PAGE_PRIVILEGED;
155157
if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
156-
access |= _PAGE_USER;
158+
access &= ~_PAGE_PRIVILEGED;
157159

158160
if (dsisr & DSISR_NOHPTE)
159161
inv_flags |= HPTE_NOHPTE_UPDATE;

0 commit comments

Comments
 (0)