|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +/* |
| 4 | + * MMU-generic set_memory implementation for powerpc |
| 5 | + * |
| 6 | + * Copyright 2019-2021, IBM Corporation. |
| 7 | + */ |
| 8 | + |
| 9 | +#include <linux/mm.h> |
| 10 | +#include <linux/vmalloc.h> |
| 11 | +#include <linux/set_memory.h> |
| 12 | + |
| 13 | +#include <asm/mmu.h> |
| 14 | +#include <asm/page.h> |
| 15 | +#include <asm/pgtable.h> |
| 16 | + |
| 17 | + |
| 18 | +/* |
| 19 | + * Updates the attributes of a page in three steps: |
| 20 | + * |
| 21 | + * 1. invalidate the page table entry |
| 22 | + * 2. flush the TLB |
| 23 | + * 3. install the new entry with the updated attributes |
| 24 | + * |
| 25 | + * Invalidating the pte means there are situations where this will not work |
| 26 | + * when in theory it should. |
| 27 | + * For example: |
| 28 | + * - removing write from page whilst it is being executed |
| 29 | + * - setting a page read-only whilst it is being read by another CPU |
| 30 | + * |
| 31 | + */ |
| 32 | +static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) |
| 33 | +{ |
| 34 | + long action = (long)data; |
| 35 | + pte_t pte; |
| 36 | + |
| 37 | + spin_lock(&init_mm.page_table_lock); |
| 38 | + |
| 39 | + /* invalidate the PTE so it's safe to modify */ |
| 40 | + pte = ptep_get_and_clear(&init_mm, addr, ptep); |
| 41 | + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
| 42 | + |
| 43 | + /* modify the PTE bits as desired, then apply */ |
| 44 | + switch (action) { |
| 45 | + case SET_MEMORY_RO: |
| 46 | + pte = pte_wrprotect(pte); |
| 47 | + break; |
| 48 | + case SET_MEMORY_RW: |
| 49 | + pte = pte_mkwrite(pte_mkdirty(pte)); |
| 50 | + break; |
| 51 | + case SET_MEMORY_NX: |
| 52 | + pte = pte_exprotect(pte); |
| 53 | + break; |
| 54 | + case SET_MEMORY_X: |
| 55 | + pte = pte_mkexec(pte); |
| 56 | + break; |
| 57 | + default: |
| 58 | + WARN_ON_ONCE(1); |
| 59 | + break; |
| 60 | + } |
| 61 | + |
| 62 | + set_pte_at(&init_mm, addr, ptep, pte); |
| 63 | + |
| 64 | + /* See ptesync comment in radix__set_pte_at() */ |
| 65 | + if (radix_enabled()) |
| 66 | + asm volatile("ptesync": : :"memory"); |
| 67 | + spin_unlock(&init_mm.page_table_lock); |
| 68 | + |
| 69 | + return 0; |
| 70 | +} |
| 71 | + |
| 72 | +int change_memory_attr(unsigned long addr, int numpages, long action) |
| 73 | +{ |
| 74 | + unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); |
| 75 | + unsigned long size = numpages * PAGE_SIZE; |
| 76 | + |
| 77 | + if (!numpages) |
| 78 | + return 0; |
| 79 | + |
| 80 | + if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) && |
| 81 | + is_vm_area_hugepages((void *)addr))) |
| 82 | + return -EINVAL; |
| 83 | + |
| 84 | +#ifdef CONFIG_PPC_BOOK3S_64 |
| 85 | + /* |
| 86 | + * On hash, the linear mapping is not in the Linux page table so |
| 87 | + * apply_to_existing_page_range() will have no effect. If in the future |
| 88 | + * the set_memory_* functions are used on the linear map this will need |
| 89 | + * to be updated. |
| 90 | + */ |
| 91 | + if (!radix_enabled()) { |
| 92 | + int region = get_region_id(addr); |
| 93 | + |
| 94 | + if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID)) |
| 95 | + return -EINVAL; |
| 96 | + } |
| 97 | +#endif |
| 98 | + |
| 99 | + return apply_to_existing_page_range(&init_mm, start, size, |
| 100 | + change_page_attr, (void *)action); |
| 101 | +} |
0 commit comments