@@ -37,12 +37,14 @@ struct cpa_data {
37
37
unsigned long numpages ;
38
38
int flags ;
39
39
unsigned long pfn ;
40
- unsigned force_split : 1 ;
40
+ unsigned force_split : 1 ,
41
+ force_static_prot : 1 ;
41
42
int curpage ;
42
43
struct page * * pages ;
43
44
};
44
45
45
46
enum cpa_warn {
47
+ CPA_CONFLICT ,
46
48
CPA_PROTECT ,
47
49
CPA_DETECT ,
48
50
};
@@ -501,6 +503,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
501
503
unsigned long pfn , const char * txt )
502
504
{
503
505
static const char * lvltxt [] = {
506
+ [CPA_CONFLICT ] = "conflict" ,
504
507
[CPA_PROTECT ] = "protect" ,
505
508
[CPA_DETECT ] = "detect" ,
506
509
};
@@ -743,7 +746,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
743
746
struct cpa_data * cpa )
744
747
{
745
748
unsigned long numpages , pmask , psize , lpaddr , addr , pfn , old_pfn ;
746
- pgprot_t old_prot , new_prot , req_prot ;
749
+ pgprot_t old_prot , new_prot , req_prot , chk_prot ;
747
750
pte_t new_pte , old_pte , * tmp ;
748
751
enum pg_level level ;
749
752
int i ;
@@ -819,6 +822,23 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
819
822
lpaddr = address & pmask ;
820
823
numpages = psize >> PAGE_SHIFT ;
821
824
825
+ /*
826
+ * Sanity check that the existing mapping is correct versus the static
827
+ * protections. static_protections() guards against !PRESENT, so no
828
+ * extra conditional required here.
829
+ */
830
+ chk_prot = static_protections (old_prot , lpaddr , old_pfn , numpages ,
831
+ CPA_CONFLICT );
832
+
833
+ if (WARN_ON_ONCE (pgprot_val (chk_prot ) != pgprot_val (old_prot ))) {
834
+ /*
835
+ * Split the large page and tell the split code to
836
+ * enforce static protections.
837
+ */
838
+ cpa -> force_static_prot = 1 ;
839
+ return 1 ;
840
+ }
841
+
822
842
/*
823
843
* Make sure that the requested pgprot does not violate the static
824
844
* protections. Check the full large page whether one of the pages
@@ -828,8 +848,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
828
848
new_prot = static_protections (req_prot , address , pfn , 1 , CPA_DETECT );
829
849
pfn = old_pfn ;
830
850
for (i = 0 , addr = lpaddr ; i < numpages ; i ++ , addr += PAGE_SIZE , pfn ++ ) {
831
- pgprot_t chk_prot = static_protections (req_prot , addr , pfn , 1 ,
832
- CPA_DETECT );
851
+ chk_prot = static_protections (req_prot , addr , pfn , 1 ,
852
+ CPA_DETECT );
833
853
cpa_inc_4k_checked ();
834
854
if (pgprot_val (chk_prot ) != pgprot_val (new_prot ))
835
855
return 1 ;
@@ -871,15 +891,50 @@ static int should_split_large_page(pte_t *kpte, unsigned long address,
871
891
return do_split ;
872
892
}
873
893
894
+ static void split_set_pte (struct cpa_data * cpa , pte_t * pte , unsigned long pfn ,
895
+ pgprot_t ref_prot , unsigned long address ,
896
+ unsigned long size )
897
+ {
898
+ unsigned int npg = PFN_DOWN (size );
899
+ pgprot_t prot ;
900
+
901
+ /*
902
+ * If should_split_large_page() discovered an inconsistent mapping,
903
+ * remove the invalid protection in the split mapping.
904
+ */
905
+ if (!cpa -> force_static_prot )
906
+ goto set ;
907
+
908
+ prot = static_protections (ref_prot , address , pfn , npg , CPA_PROTECT );
909
+
910
+ if (pgprot_val (prot ) == pgprot_val (ref_prot ))
911
+ goto set ;
912
+
913
+ /*
914
+ * If this is splitting a PMD, fix it up. PUD splits cannot be
915
+ * fixed trivially as that would require to rescan the newly
916
+ * installed PMD mappings after returning from split_large_page()
917
+ * so an eventual further split can allocate the necessary PTE
918
+ * pages. Warn for now and revisit it in case this actually
919
+ * happens.
920
+ */
921
+ if (size == PAGE_SIZE )
922
+ ref_prot = prot ;
923
+ else
924
+ pr_warn_once ("CPA: Cannot fixup static protections for PUD split\n" );
925
+ set :
926
+ set_pte (pte , pfn_pte (pfn , ref_prot ));
927
+ }
928
+
874
929
static int
875
930
__split_large_page (struct cpa_data * cpa , pte_t * kpte , unsigned long address ,
876
931
struct page * base )
877
932
{
933
+ unsigned long lpaddr , lpinc , ref_pfn , pfn , pfninc = 1 ;
878
934
pte_t * pbase = (pte_t * )page_address (base );
879
- unsigned long ref_pfn , pfn , pfninc = 1 ;
880
935
unsigned int i , level ;
881
- pte_t * tmp ;
882
936
pgprot_t ref_prot ;
937
+ pte_t * tmp ;
883
938
884
939
spin_lock (& pgd_lock );
885
940
/*
@@ -902,15 +957,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
902
957
* PAT bit to correct position.
903
958
*/
904
959
ref_prot = pgprot_large_2_4k (ref_prot );
905
-
906
960
ref_pfn = pmd_pfn (* (pmd_t * )kpte );
961
+ lpaddr = address & PMD_MASK ;
962
+ lpinc = PAGE_SIZE ;
907
963
break ;
908
964
909
965
case PG_LEVEL_1G :
910
966
ref_prot = pud_pgprot (* (pud_t * )kpte );
911
967
ref_pfn = pud_pfn (* (pud_t * )kpte );
912
968
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT ;
913
-
969
+ lpaddr = address & PUD_MASK ;
970
+ lpinc = PMD_SIZE ;
914
971
/*
915
972
* Clear the PSE flags if the PRESENT flag is not set
916
973
* otherwise pmd_present/pmd_huge will return true
@@ -931,8 +988,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
931
988
* Get the target pfn from the original entry:
932
989
*/
933
990
pfn = ref_pfn ;
934
- for (i = 0 ; i < PTRS_PER_PTE ; i ++ , pfn += pfninc )
935
- set_pte ( & pbase [ i ], pfn_pte ( pfn , ref_prot ) );
991
+ for (i = 0 ; i < PTRS_PER_PTE ; i ++ , pfn += pfninc , lpaddr += lpinc )
992
+ split_set_pte ( cpa , pbase + i , pfn , ref_prot , lpaddr , lpinc );
936
993
937
994
if (virt_addr_valid (address )) {
938
995
unsigned long pfn = PFN_DOWN (__pa (address ));
0 commit comments