|
24 | 24 |
|
25 | 25 | /* These actually do the work of building the kernel identity maps. */
|
26 | 26 | #include <linux/pgtable.h>
|
| 27 | +#include <asm/cmpxchg.h> |
27 | 28 | #include <asm/trap_pf.h>
|
28 | 29 | #include <asm/trapnr.h>
|
29 | 30 | #include <asm/init.h>
|
@@ -165,6 +166,138 @@ void finalize_identity_maps(void)
|
165 | 166 | write_cr3(top_level_pgt);
|
166 | 167 | }
|
167 | 168 |
|
| 169 | +static pte_t *split_large_pmd(struct x86_mapping_info *info, |
| 170 | + pmd_t *pmdp, unsigned long __address) |
| 171 | +{ |
| 172 | + unsigned long page_flags; |
| 173 | + unsigned long address; |
| 174 | + pte_t *pte; |
| 175 | + pmd_t pmd; |
| 176 | + int i; |
| 177 | + |
| 178 | + pte = (pte_t *)info->alloc_pgt_page(info->context); |
| 179 | + if (!pte) |
| 180 | + return NULL; |
| 181 | + |
| 182 | + address = __address & PMD_MASK; |
| 183 | + /* No large page - clear PSE flag */ |
| 184 | + page_flags = info->page_flag & ~_PAGE_PSE; |
| 185 | + |
| 186 | + /* Populate the PTEs */ |
| 187 | + for (i = 0; i < PTRS_PER_PMD; i++) { |
| 188 | + set_pte(&pte[i], __pte(address | page_flags)); |
| 189 | + address += PAGE_SIZE; |
| 190 | + } |
| 191 | + |
| 192 | + /* |
| 193 | + * Ideally we need to clear the large PMD first and do a TLB |
| 194 | + * flush before we write the new PMD. But the 2M range of the |
| 195 | + * PMD might contain the code we execute and/or the stack |
| 196 | + * we are on, so we can't do that. But that should be safe here |
| 197 | + * because we are going from large to small mappings and we are |
| 198 | + * also the only user of the page-table, so there is no chance |
| 199 | + * of a TLB multihit. |
| 200 | + */ |
| 201 | + pmd = __pmd((unsigned long)pte | info->kernpg_flag); |
| 202 | + set_pmd(pmdp, pmd); |
| 203 | + /* Flush TLB to establish the new PMD */ |
| 204 | + write_cr3(top_level_pgt); |
| 205 | + |
| 206 | + return pte + pte_index(__address); |
| 207 | +} |
| 208 | + |
| 209 | +static void clflush_page(unsigned long address) |
| 210 | +{ |
| 211 | + unsigned int flush_size; |
| 212 | + char *cl, *start, *end; |
| 213 | + |
| 214 | + /* |
| 215 | + * Hardcode cl-size to 64 - CPUID can't be used here because that might |
| 216 | + * cause another #VC exception and the GHCB is not ready to use yet. |
| 217 | + */ |
| 218 | + flush_size = 64; |
| 219 | + start = (char *)(address & PAGE_MASK); |
| 220 | + end = start + PAGE_SIZE; |
| 221 | + |
| 222 | + /* |
| 223 | + * First make sure there are no pending writes on the cache-lines to |
| 224 | + * flush. |
| 225 | + */ |
| 226 | + asm volatile("mfence" : : : "memory"); |
| 227 | + |
| 228 | + for (cl = start; cl != end; cl += flush_size) |
| 229 | + clflush(cl); |
| 230 | +} |
| 231 | + |
| 232 | +static int set_clr_page_flags(struct x86_mapping_info *info, |
| 233 | + unsigned long address, |
| 234 | + pteval_t set, pteval_t clr) |
| 235 | +{ |
| 236 | + pgd_t *pgdp = (pgd_t *)top_level_pgt; |
| 237 | + p4d_t *p4dp; |
| 238 | + pud_t *pudp; |
| 239 | + pmd_t *pmdp; |
| 240 | + pte_t *ptep, pte; |
| 241 | + |
| 242 | + /* |
| 243 | + * First make sure there is a PMD mapping for 'address'. |
| 244 | + * It should already exist, but keep things generic. |
| 245 | + * |
| 246 | + * To map the page just read from it and fault it in if there is no |
| 247 | + * mapping yet. add_identity_map() can't be called here because that |
| 248 | + * would unconditionally map the address on PMD level, destroying any |
| 249 | + * PTE-level mappings that might already exist. Use assembly here so |
| 250 | + * the access won't be optimized away. |
| 251 | + */ |
| 252 | + asm volatile("mov %[address], %%r9" |
| 253 | + :: [address] "g" (*(unsigned long *)address) |
| 254 | + : "r9", "memory"); |
| 255 | + |
| 256 | + /* |
| 257 | + * The page is mapped at least with PMD size - so skip checks and walk |
| 258 | + * directly to the PMD. |
| 259 | + */ |
| 260 | + p4dp = p4d_offset(pgdp, address); |
| 261 | + pudp = pud_offset(p4dp, address); |
| 262 | + pmdp = pmd_offset(pudp, address); |
| 263 | + |
| 264 | + if (pmd_large(*pmdp)) |
| 265 | + ptep = split_large_pmd(info, pmdp, address); |
| 266 | + else |
| 267 | + ptep = pte_offset_kernel(pmdp, address); |
| 268 | + |
| 269 | + if (!ptep) |
| 270 | + return -ENOMEM; |
| 271 | + |
| 272 | + /* |
| 273 | + * Changing encryption attributes of a page requires to flush it from |
| 274 | + * the caches. |
| 275 | + */ |
| 276 | + if ((set | clr) & _PAGE_ENC) |
| 277 | + clflush_page(address); |
| 278 | + |
| 279 | + /* Update PTE */ |
| 280 | + pte = *ptep; |
| 281 | + pte = pte_set_flags(pte, set); |
| 282 | + pte = pte_clear_flags(pte, clr); |
| 283 | + set_pte(ptep, pte); |
| 284 | + |
| 285 | + /* Flush TLB after changing encryption attribute */ |
| 286 | + write_cr3(top_level_pgt); |
| 287 | + |
| 288 | + return 0; |
| 289 | +} |
| 290 | + |
| 291 | +int set_page_decrypted(unsigned long address) |
| 292 | +{ |
| 293 | + return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC); |
| 294 | +} |
| 295 | + |
| 296 | +int set_page_encrypted(unsigned long address) |
| 297 | +{ |
| 298 | + return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0); |
| 299 | +} |
| 300 | + |
168 | 301 | static void do_pf_error(const char *msg, unsigned long error_code,
|
169 | 302 | unsigned long address, unsigned long ip)
|
170 | 303 | {
|
|
0 commit comments