Skip to content

Commit f61c5ba

Browse files
committed
x86/mm/cpa: Add sanity check for existing mappings
With the range check it is possible to do a quick verification that the current mapping is correct vs. the static protection areas. In case a incorrect mapping is detected a warning is emitted and the large page is split up. If the large page is a 2M page, then the split code is forced to check the static protections for the PTE entries to fix up the incorrectness. For 1G pages this can't be done easily because that would require to either find the offending 2M areas before the split or afterwards. For now just warn about that case and revisit it when reported. Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Dave Hansen <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Bin Yang <[email protected]> Cc: Mark Gross <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 69c31e6 commit f61c5ba

File tree

1 file changed

+67
-10
lines changed

1 file changed

+67
-10
lines changed

arch/x86/mm/pageattr.c

Lines changed: 67 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,14 @@ struct cpa_data {
3737
unsigned long numpages;
3838
int flags;
3939
unsigned long pfn;
40-
unsigned force_split : 1;
40+
unsigned force_split : 1,
41+
force_static_prot : 1;
4142
int curpage;
4243
struct page **pages;
4344
};
4445

4546
enum cpa_warn {
47+
CPA_CONFLICT,
4648
CPA_PROTECT,
4749
CPA_DETECT,
4850
};
@@ -501,6 +503,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
501503
unsigned long pfn, const char *txt)
502504
{
503505
static const char *lvltxt[] = {
506+
[CPA_CONFLICT] = "conflict",
504507
[CPA_PROTECT] = "protect",
505508
[CPA_DETECT] = "detect",
506509
};
@@ -743,7 +746,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
743746
struct cpa_data *cpa)
744747
{
745748
unsigned long numpages, pmask, psize, lpaddr, addr, pfn, old_pfn;
746-
pgprot_t old_prot, new_prot, req_prot;
749+
pgprot_t old_prot, new_prot, req_prot, chk_prot;
747750
pte_t new_pte, old_pte, *tmp;
748751
enum pg_level level;
749752
int i;
@@ -819,6 +822,23 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
819822
lpaddr = address & pmask;
820823
numpages = psize >> PAGE_SHIFT;
821824

825+
/*
826+
* Sanity check that the existing mapping is correct versus the static
827+
* protections. static_protections() guards against !PRESENT, so no
828+
* extra conditional required here.
829+
*/
830+
chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
831+
CPA_CONFLICT);
832+
833+
if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
834+
/*
835+
* Split the large page and tell the split code to
836+
* enforce static protections.
837+
*/
838+
cpa->force_static_prot = 1;
839+
return 1;
840+
}
841+
822842
/*
823843
* Make sure that the requested pgprot does not violate the static
824844
* protections. Check the full large page whether one of the pages
@@ -828,8 +848,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
828848
new_prot = static_protections(req_prot, address, pfn, 1, CPA_DETECT);
829849
pfn = old_pfn;
830850
for (i = 0, addr = lpaddr; i < numpages; i++, addr += PAGE_SIZE, pfn++) {
831-
pgprot_t chk_prot = static_protections(req_prot, addr, pfn, 1,
832-
CPA_DETECT);
851+
chk_prot = static_protections(req_prot, addr, pfn, 1,
852+
CPA_DETECT);
833853
cpa_inc_4k_checked();
834854
if (pgprot_val(chk_prot) != pgprot_val(new_prot))
835855
return 1;
@@ -871,15 +891,50 @@ static int should_split_large_page(pte_t *kpte, unsigned long address,
871891
return do_split;
872892
}
873893

894+
static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
895+
pgprot_t ref_prot, unsigned long address,
896+
unsigned long size)
897+
{
898+
unsigned int npg = PFN_DOWN(size);
899+
pgprot_t prot;
900+
901+
/*
902+
* If should_split_large_page() discovered an inconsistent mapping,
903+
* remove the invalid protection in the split mapping.
904+
*/
905+
if (!cpa->force_static_prot)
906+
goto set;
907+
908+
prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
909+
910+
if (pgprot_val(prot) == pgprot_val(ref_prot))
911+
goto set;
912+
913+
/*
914+
* If this is splitting a PMD, fix it up. PUD splits cannot be
915+
* fixed trivially as that would require to rescan the newly
916+
* installed PMD mappings after returning from split_large_page()
917+
* so an eventual further split can allocate the necessary PTE
918+
* pages. Warn for now and revisit it in case this actually
919+
* happens.
920+
*/
921+
if (size == PAGE_SIZE)
922+
ref_prot = prot;
923+
else
924+
pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
925+
set:
926+
set_pte(pte, pfn_pte(pfn, ref_prot));
927+
}
928+
874929
static int
875930
__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
876931
struct page *base)
877932
{
933+
unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
878934
pte_t *pbase = (pte_t *)page_address(base);
879-
unsigned long ref_pfn, pfn, pfninc = 1;
880935
unsigned int i, level;
881-
pte_t *tmp;
882936
pgprot_t ref_prot;
937+
pte_t *tmp;
883938

884939
spin_lock(&pgd_lock);
885940
/*
@@ -902,15 +957,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
902957
* PAT bit to correct position.
903958
*/
904959
ref_prot = pgprot_large_2_4k(ref_prot);
905-
906960
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
961+
lpaddr = address & PMD_MASK;
962+
lpinc = PAGE_SIZE;
907963
break;
908964

909965
case PG_LEVEL_1G:
910966
ref_prot = pud_pgprot(*(pud_t *)kpte);
911967
ref_pfn = pud_pfn(*(pud_t *)kpte);
912968
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
913-
969+
lpaddr = address & PUD_MASK;
970+
lpinc = PMD_SIZE;
914971
/*
915972
* Clear the PSE flags if the PRESENT flag is not set
916973
* otherwise pmd_present/pmd_huge will return true
@@ -931,8 +988,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
931988
* Get the target pfn from the original entry:
932989
*/
933990
pfn = ref_pfn;
934-
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
935-
set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
991+
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
992+
split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
936993

937994
if (virt_addr_valid(address)) {
938995
unsigned long pfn = PFN_DOWN(__pa(address));

0 commit comments

Comments
 (0)