Skip to content

Commit 8f62c88

Browse files
hansendcIngo Molnar
authored andcommitted
x86/mm/pkeys: Add arch-specific VMA protection bits
Lots of things seem to do: vma->vm_page_prot = vm_get_page_prot(flags); and the ptes get created right from things we pull out of ->vm_page_prot. So it is very convenient if we can store the protection key in flags and vm_page_prot, just like the existing permission bits (_PAGE_RW/PRESENT). It greatly reduces the amount of plumbing and arch-specific hacking we have to do in generic code. This also takes the new PROT_PKEY{0,1,2,3} flags and turns *those* in to VM_ flags for vma->vm_flags. The protection key values are stored in 4 places: 1. "prot" argument to system calls 2. vma->vm_flags, filled from the mmap "prot" 3. vma->vm_page prot, filled from vma->vm_flags 4. the PTE itself. The pseudocode for these for steps are as follows: mmap(PROT_PKEY*) vma->vm_flags = ... | arch_calc_vm_prot_bits(mmap_prot); vma->vm_page_prot = ... | arch_vm_get_page_prot(vma->vm_flags); pte = pfn | vma->vm_page_prot Note that this provides a new definitions for x86: arch_vm_get_page_prot() Signed-off-by: Dave Hansen <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 63c17fb commit 8f62c88

File tree

4 files changed

+44
-2
lines changed

4 files changed

+44
-2
lines changed

arch/x86/include/asm/mmu_context.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -275,4 +275,15 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
275275
mpx_notify_unmap(mm, vma, start, end);
276276
}
277277

278+
static inline int vma_pkey(struct vm_area_struct *vma)
279+
{
280+
u16 pkey = 0;
281+
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
282+
unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
283+
VM_PKEY_BIT2 | VM_PKEY_BIT3;
284+
pkey = (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
285+
#endif
286+
return pkey;
287+
}
288+
278289
#endif /* _ASM_X86_MMU_CONTEXT_H */

arch/x86/include/asm/pgtable_types.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,12 @@
115115
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
116116
_PAGE_DIRTY)
117117

118-
/* Set of bits not changed in pte_modify */
118+
/*
119+
* Set of bits not changed in pte_modify. The pte's
120+
* protection key is treated like _PAGE_RW, for
121+
* instance, and is *not* included in this mask since
122+
* pte_modify() does modify it.
123+
*/
119124
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
120125
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
121126
_PAGE_SOFT_DIRTY)
@@ -231,7 +236,10 @@ enum page_cache_mode {
231236
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
232237
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
233238

234-
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
239+
/*
240+
* Extracts the flags from a (pte|pmd|pud|pgd)val_t
241+
* This includes the protection key value.
242+
*/
235243
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
236244

237245
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;

arch/x86/include/uapi/asm/mman.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,22 @@
66
#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
77
#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
88

9+
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
10+
/*
11+
* Take the 4 protection key bits out of the vma->vm_flags
12+
* value and turn them in to the bits that we can put in
13+
* to a pte.
14+
*
15+
* Only override these if Protection Keys are available
16+
* (which is only on 64-bit).
17+
*/
18+
#define arch_vm_get_page_prot(vm_flags) __pgprot( \
19+
((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \
20+
((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \
21+
((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \
22+
((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
23+
#endif
24+
925
#include <asm-generic/mman.h>
1026

1127
#endif /* _ASM_X86_MMAN_H */

include/linux/mm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,13 @@ extern unsigned int kobjsize(const void *objp);
183183

184184
#if defined(CONFIG_X86)
185185
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
186+
#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
187+
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
188+
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
189+
# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
190+
# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
191+
# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
192+
#endif
186193
#elif defined(CONFIG_PPC)
187194
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
188195
#elif defined(CONFIG_PARISC)

0 commit comments

Comments
 (0)