@@ -485,13 +485,22 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
485
485
486
486
/*
487
487
* We are safe now. Check whether the new pgprot is the same:
488
+ * Convert protection attributes to 4k-format, as cpa->mask* are set
489
+ * up accordingly.
488
490
*/
489
491
old_pte = * kpte ;
490
- old_prot = req_prot = pte_pgprot (old_pte );
492
+ old_prot = req_prot = pgprot_large_2_4k ( pte_pgprot (old_pte ) );
491
493
492
494
pgprot_val (req_prot ) &= ~pgprot_val (cpa -> mask_clr );
493
495
pgprot_val (req_prot ) |= pgprot_val (cpa -> mask_set );
494
496
497
+ /*
498
+ * req_prot is in format of 4k pages. It must be converted to large
499
+ * page format: the caching mode includes the PAT bit located at
500
+ * different bit positions in the two formats.
501
+ */
502
+ req_prot = pgprot_4k_2_large (req_prot );
503
+
495
504
/*
496
505
* Set the PSE and GLOBAL flags only if the PRESENT flag is
497
506
* set otherwise pmd_present/pmd_huge will return true even on
@@ -585,13 +594,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
585
594
586
595
paravirt_alloc_pte (& init_mm , page_to_pfn (base ));
587
596
ref_prot = pte_pgprot (pte_clrhuge (* kpte ));
588
- /*
589
- * If we ever want to utilize the PAT bit, we need to
590
- * update this function to make sure it's converted from
591
- * bit 12 to bit 7 when we cross from the 2MB level to
592
- * the 4K level:
593
- */
594
- WARN_ON_ONCE (pgprot_val (ref_prot ) & _PAGE_PAT_LARGE );
597
+
598
+ /* promote PAT bit to correct position */
599
+ if (level == PG_LEVEL_2M )
600
+ ref_prot = pgprot_large_2_4k (ref_prot );
595
601
596
602
#ifdef CONFIG_X86_64
597
603
if (level == PG_LEVEL_1G ) {
@@ -879,6 +885,7 @@ static int populate_pmd(struct cpa_data *cpa,
879
885
{
880
886
unsigned int cur_pages = 0 ;
881
887
pmd_t * pmd ;
888
+ pgprot_t pmd_pgprot ;
882
889
883
890
/*
884
891
* Not on a 2M boundary?
@@ -910,6 +917,8 @@ static int populate_pmd(struct cpa_data *cpa,
910
917
if (num_pages == cur_pages )
911
918
return cur_pages ;
912
919
920
+ pmd_pgprot = pgprot_4k_2_large (pgprot );
921
+
913
922
while (end - start >= PMD_SIZE ) {
914
923
915
924
/*
@@ -921,7 +930,8 @@ static int populate_pmd(struct cpa_data *cpa,
921
930
922
931
pmd = pmd_offset (pud , start );
923
932
924
- set_pmd (pmd , __pmd (cpa -> pfn | _PAGE_PSE | massage_pgprot (pgprot )));
933
+ set_pmd (pmd , __pmd (cpa -> pfn | _PAGE_PSE |
934
+ massage_pgprot (pmd_pgprot )));
925
935
926
936
start += PMD_SIZE ;
927
937
cpa -> pfn += PMD_SIZE ;
@@ -949,6 +959,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
949
959
pud_t * pud ;
950
960
unsigned long end ;
951
961
int cur_pages = 0 ;
962
+ pgprot_t pud_pgprot ;
952
963
953
964
end = start + (cpa -> numpages << PAGE_SHIFT );
954
965
@@ -986,12 +997,14 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
986
997
return cur_pages ;
987
998
988
999
pud = pud_offset (pgd , start );
1000
+ pud_pgprot = pgprot_4k_2_large (pgprot );
989
1001
990
1002
/*
991
1003
* Map everything starting from the Gb boundary, possibly with 1G pages
992
1004
*/
993
1005
while (end - start >= PUD_SIZE ) {
994
- set_pud (pud , __pud (cpa -> pfn | _PAGE_PSE | massage_pgprot (pgprot )));
1006
+ set_pud (pud , __pud (cpa -> pfn | _PAGE_PSE |
1007
+ massage_pgprot (pud_pgprot )));
995
1008
996
1009
start += PUD_SIZE ;
997
1010
cpa -> pfn += PUD_SIZE ;
0 commit comments