Skip to content

Commit 1f9ad21

Browse files
ruscurmpe
authored andcommitted
powerpc/mm: Implement set_memory() routines
The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX, and are generally useful primitives to have. This implementation is designed to be generic across powerpc's many MMUs. It's possible that this could be optimised to be faster for specific MMUs. This implementation does not handle cases where the caller is attempting to change the mapping of the page it is executing from, or if another CPU is concurrently using the page being altered. These cases likely shouldn't happen, but a more complex implementation with MMU-specific code could safely handle them. On hash, the linear mapping is not kept in the linux pagetable, so this will not change the protection if used on that range. Currently these functions are not used on the linear map so just WARN for now. apply_to_existing_page_range() does not work on huge pages so for now disallow changing the protection of huge pages. [jpn: - Allow set memory functions to be used without Strict RWX - Hash: Disallow certain regions - Have change_page_attr() take function pointers to manipulate ptes - Radix: Add ptesync after set_pte_at()] Signed-off-by: Russell Currey <[email protected]> Signed-off-by: Christophe Leroy <[email protected]> Signed-off-by: Jordan Niethe <[email protected]> Reviewed-by: Daniel Axtens <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 393eff5 commit 1f9ad21

File tree

4 files changed

+135
-1
lines changed

4 files changed

+135
-1
lines changed

arch/powerpc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ config PPC
140140
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
141141
select ARCH_HAS_PTE_SPECIAL
142142
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
143+
select ARCH_HAS_SET_MEMORY
143144
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
144145
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
145146
select ARCH_HAS_UACCESS_FLUSHCACHE

arch/powerpc/include/asm/set_memory.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_POWERPC_SET_MEMORY_H
3+
#define _ASM_POWERPC_SET_MEMORY_H
4+
5+
#define SET_MEMORY_RO 0
6+
#define SET_MEMORY_RW 1
7+
#define SET_MEMORY_NX 2
8+
#define SET_MEMORY_X 3
9+
10+
int change_memory_attr(unsigned long addr, int numpages, long action);
11+
12+
static inline int set_memory_ro(unsigned long addr, int numpages)
13+
{
14+
return change_memory_attr(addr, numpages, SET_MEMORY_RO);
15+
}
16+
17+
static inline int set_memory_rw(unsigned long addr, int numpages)
18+
{
19+
return change_memory_attr(addr, numpages, SET_MEMORY_RW);
20+
}
21+
22+
static inline int set_memory_nx(unsigned long addr, int numpages)
23+
{
24+
return change_memory_attr(addr, numpages, SET_MEMORY_NX);
25+
}
26+
27+
static inline int set_memory_x(unsigned long addr, int numpages)
28+
{
29+
return change_memory_attr(addr, numpages, SET_MEMORY_X);
30+
}
31+
32+
#endif

arch/powerpc/mm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
77

8-
obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
8+
obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
99
init_$(BITS).o pgtable_$(BITS).o \
1010
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
1111
init-common.o mmu_context.o drmem.o \

arch/powerpc/mm/pageattr.c

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
/*
4+
* MMU-generic set_memory implementation for powerpc
5+
*
6+
* Copyright 2019-2021, IBM Corporation.
7+
*/
8+
9+
#include <linux/mm.h>
10+
#include <linux/vmalloc.h>
11+
#include <linux/set_memory.h>
12+
13+
#include <asm/mmu.h>
14+
#include <asm/page.h>
15+
#include <asm/pgtable.h>
16+
17+
18+
/*
19+
* Updates the attributes of a page in three steps:
20+
*
21+
* 1. invalidate the page table entry
22+
* 2. flush the TLB
23+
* 3. install the new entry with the updated attributes
24+
*
25+
* Invalidating the pte means there are situations where this will not work
26+
* when in theory it should.
27+
* For example:
28+
* - removing write from page whilst it is being executed
29+
* - setting a page read-only whilst it is being read by another CPU
30+
*
31+
*/
32+
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
33+
{
34+
long action = (long)data;
35+
pte_t pte;
36+
37+
spin_lock(&init_mm.page_table_lock);
38+
39+
/* invalidate the PTE so it's safe to modify */
40+
pte = ptep_get_and_clear(&init_mm, addr, ptep);
41+
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
42+
43+
/* modify the PTE bits as desired, then apply */
44+
switch (action) {
45+
case SET_MEMORY_RO:
46+
pte = pte_wrprotect(pte);
47+
break;
48+
case SET_MEMORY_RW:
49+
pte = pte_mkwrite(pte_mkdirty(pte));
50+
break;
51+
case SET_MEMORY_NX:
52+
pte = pte_exprotect(pte);
53+
break;
54+
case SET_MEMORY_X:
55+
pte = pte_mkexec(pte);
56+
break;
57+
default:
58+
WARN_ON_ONCE(1);
59+
break;
60+
}
61+
62+
set_pte_at(&init_mm, addr, ptep, pte);
63+
64+
/* See ptesync comment in radix__set_pte_at() */
65+
if (radix_enabled())
66+
asm volatile("ptesync": : :"memory");
67+
spin_unlock(&init_mm.page_table_lock);
68+
69+
return 0;
70+
}
71+
72+
int change_memory_attr(unsigned long addr, int numpages, long action)
73+
{
74+
unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
75+
unsigned long size = numpages * PAGE_SIZE;
76+
77+
if (!numpages)
78+
return 0;
79+
80+
if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
81+
is_vm_area_hugepages((void *)addr)))
82+
return -EINVAL;
83+
84+
#ifdef CONFIG_PPC_BOOK3S_64
85+
/*
86+
* On hash, the linear mapping is not in the Linux page table so
87+
* apply_to_existing_page_range() will have no effect. If in the future
88+
* the set_memory_* functions are used on the linear map this will need
89+
* to be updated.
90+
*/
91+
if (!radix_enabled()) {
92+
int region = get_region_id(addr);
93+
94+
if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
95+
return -EINVAL;
96+
}
97+
#endif
98+
99+
return apply_to_existing_page_range(&init_mm, start, size,
100+
change_page_attr, (void *)action);
101+
}

0 commit comments

Comments
 (0)