Skip to content

Commit 565977a

Browse files
toshikaniKAGA-KOKO
authored andcommitted
x86/mm: Remove pointless checks in vmalloc_fault
vmalloc_fault() sets user's pgd or p4d from the kernel page table. Once it's set, all tables underneath are identical. There is no point of following the same page table with two separate pointers and make sure they see the same with BUG(). Remove the pointless checks in vmalloc_fault(). Also rename the kernel pgd/p4d pointers to pgd_k/p4d_k so that their names are consistent in the file. Suggested-by: Andy Lutomirski <[email protected]> Signed-off-by: Toshi Kani <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: Borislav Petkov <[email protected]> Cc: Gratian Crisan <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 745dd37 commit 565977a

File tree

1 file changed

+17
-39
lines changed

1 file changed

+17
-39
lines changed

arch/x86/mm/fault.c

Lines changed: 17 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -417,11 +417,11 @@ void vmalloc_sync_all(void)
417417
*/
418418
static noinline int vmalloc_fault(unsigned long address)
419419
{
420-
pgd_t *pgd, *pgd_ref;
421-
p4d_t *p4d, *p4d_ref;
422-
pud_t *pud, *pud_ref;
423-
pmd_t *pmd, *pmd_ref;
424-
pte_t *pte, *pte_ref;
420+
pgd_t *pgd, *pgd_k;
421+
p4d_t *p4d, *p4d_k;
422+
pud_t *pud;
423+
pmd_t *pmd;
424+
pte_t *pte;
425425

426426
/* Make sure we are in vmalloc area: */
427427
if (!(address >= VMALLOC_START && address < VMALLOC_END))
@@ -435,73 +435,51 @@ static noinline int vmalloc_fault(unsigned long address)
435435
* case just flush:
436436
*/
437437
pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
438-
pgd_ref = pgd_offset_k(address);
439-
if (pgd_none(*pgd_ref))
438+
pgd_k = pgd_offset_k(address);
439+
if (pgd_none(*pgd_k))
440440
return -1;
441441

442442
if (pgtable_l5_enabled) {
443443
if (pgd_none(*pgd)) {
444-
set_pgd(pgd, *pgd_ref);
444+
set_pgd(pgd, *pgd_k);
445445
arch_flush_lazy_mmu_mode();
446446
} else {
447-
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
447+
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
448448
}
449449
}
450450

451451
/* With 4-level paging, copying happens on the p4d level. */
452452
p4d = p4d_offset(pgd, address);
453-
p4d_ref = p4d_offset(pgd_ref, address);
454-
if (p4d_none(*p4d_ref))
453+
p4d_k = p4d_offset(pgd_k, address);
454+
if (p4d_none(*p4d_k))
455455
return -1;
456456

457457
if (p4d_none(*p4d) && !pgtable_l5_enabled) {
458-
set_p4d(p4d, *p4d_ref);
458+
set_p4d(p4d, *p4d_k);
459459
arch_flush_lazy_mmu_mode();
460460
} else {
461-
BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_ref));
461+
BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
462462
}
463463

464-
/*
465-
* Below here mismatches are bugs because these lower tables
466-
* are shared:
467-
*/
468464
BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
469465

470466
pud = pud_offset(p4d, address);
471-
pud_ref = pud_offset(p4d_ref, address);
472-
if (pud_none(*pud_ref))
467+
if (pud_none(*pud))
473468
return -1;
474469

475-
if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
476-
BUG();
477-
478470
if (pud_large(*pud))
479471
return 0;
480472

481473
pmd = pmd_offset(pud, address);
482-
pmd_ref = pmd_offset(pud_ref, address);
483-
if (pmd_none(*pmd_ref))
474+
if (pmd_none(*pmd))
484475
return -1;
485476

486-
if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
487-
BUG();
488-
489477
if (pmd_large(*pmd))
490478
return 0;
491479

492-
pte_ref = pte_offset_kernel(pmd_ref, address);
493-
if (!pte_present(*pte_ref))
494-
return -1;
495-
496480
pte = pte_offset_kernel(pmd, address);
497-
498-
/*
499-
* Don't use pte_page here, because the mappings can point
500-
* outside mem_map, and the NUMA hash lookup cannot handle
501-
* that:
502-
*/
503-
if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
504-
BUG();
481+
if (!pte_present(*pte))
482+
return -1;
505483

506484
return 0;
507485
}

0 commit comments

Comments
 (0)