@@ -657,32 +657,58 @@ static u64 __init get_jump_table_addr(void)
657
657
return ret ;
658
658
}
659
659
660
- static void pvalidate_pages (unsigned long vaddr , unsigned long npages , bool validate )
660
+ static void pvalidate_pages (struct snp_psc_desc * desc )
661
661
{
662
- unsigned long vaddr_end ;
662
+ struct psc_entry * e ;
663
+ unsigned long vaddr ;
664
+ unsigned int size ;
665
+ unsigned int i ;
666
+ bool validate ;
663
667
int rc ;
664
668
665
- vaddr = vaddr & PAGE_MASK ;
666
- vaddr_end = vaddr + (npages << PAGE_SHIFT );
669
+ for (i = 0 ; i <= desc -> hdr .end_entry ; i ++ ) {
670
+ e = & desc -> entries [i ];
671
+
672
+ vaddr = (unsigned long )pfn_to_kaddr (e -> gfn );
673
+ size = e -> pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K ;
674
+ validate = e -> operation == SNP_PAGE_STATE_PRIVATE ;
675
+
676
+ rc = pvalidate (vaddr , size , validate );
677
+ if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M ) {
678
+ unsigned long vaddr_end = vaddr + PMD_SIZE ;
679
+
680
+ for (; vaddr < vaddr_end ; vaddr += PAGE_SIZE ) {
681
+ rc = pvalidate (vaddr , RMP_PG_SIZE_4K , validate );
682
+ if (rc )
683
+ break ;
684
+ }
685
+ }
667
686
668
- while (vaddr < vaddr_end ) {
669
- rc = pvalidate (vaddr , RMP_PG_SIZE_4K , validate );
670
687
if (WARN (rc , "Failed to validate address 0x%lx ret %d" , vaddr , rc ))
671
688
sev_es_terminate (SEV_TERM_SET_LINUX , GHCB_TERM_PVALIDATE );
672
-
673
- vaddr = vaddr + PAGE_SIZE ;
674
689
}
675
690
}
676
691
677
- static void early_set_pages_state (unsigned long paddr , unsigned long npages , enum psc_op op )
692
+ static void early_set_pages_state (unsigned long vaddr , unsigned long paddr ,
693
+ unsigned long npages , enum psc_op op )
678
694
{
679
695
unsigned long paddr_end ;
680
696
u64 val ;
697
+ int ret ;
698
+
699
+ vaddr = vaddr & PAGE_MASK ;
681
700
682
701
paddr = paddr & PAGE_MASK ;
683
702
paddr_end = paddr + (npages << PAGE_SHIFT );
684
703
685
704
while (paddr < paddr_end ) {
705
+ if (op == SNP_PAGE_STATE_SHARED ) {
706
+ /* Page validation must be rescinded before changing to shared */
707
+ ret = pvalidate (vaddr , RMP_PG_SIZE_4K , false);
708
+ if (WARN (ret , "Failed to validate address 0x%lx ret %d" , paddr , ret ))
709
+ goto e_term ;
710
+ }
711
+
686
712
/*
687
713
* Use the MSR protocol because this function can be called before
688
714
* the GHCB is established.
@@ -703,7 +729,15 @@ static void early_set_pages_state(unsigned long paddr, unsigned long npages, enu
703
729
paddr , GHCB_MSR_PSC_RESP_VAL (val )))
704
730
goto e_term ;
705
731
706
- paddr = paddr + PAGE_SIZE ;
732
+ if (op == SNP_PAGE_STATE_PRIVATE ) {
733
+ /* Page validation must be performed after changing to private */
734
+ ret = pvalidate (vaddr , RMP_PG_SIZE_4K , true);
735
+ if (WARN (ret , "Failed to validate address 0x%lx ret %d" , paddr , ret ))
736
+ goto e_term ;
737
+ }
738
+
739
+ vaddr += PAGE_SIZE ;
740
+ paddr += PAGE_SIZE ;
707
741
}
708
742
709
743
return ;
@@ -728,10 +762,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
728
762
* Ask the hypervisor to mark the memory pages as private in the RMP
729
763
* table.
730
764
*/
731
- early_set_pages_state (paddr , npages , SNP_PAGE_STATE_PRIVATE );
732
-
733
- /* Validate the memory pages after they've been added in the RMP table. */
734
- pvalidate_pages (vaddr , npages , true);
765
+ early_set_pages_state (vaddr , paddr , npages , SNP_PAGE_STATE_PRIVATE );
735
766
}
736
767
737
768
void __init early_snp_set_memory_shared (unsigned long vaddr , unsigned long paddr ,
@@ -746,11 +777,8 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
746
777
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED ))
747
778
return ;
748
779
749
- /* Invalidate the memory pages before they are marked shared in the RMP table. */
750
- pvalidate_pages (vaddr , npages , false);
751
-
752
780
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
753
- early_set_pages_state (paddr , npages , SNP_PAGE_STATE_SHARED );
781
+ early_set_pages_state (vaddr , paddr , npages , SNP_PAGE_STATE_SHARED );
754
782
}
755
783
756
784
void __init snp_prep_memory (unsigned long paddr , unsigned int sz , enum psc_op op )
@@ -834,10 +862,11 @@ static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
834
862
return ret ;
835
863
}
836
864
837
- static void __set_pages_state (struct snp_psc_desc * data , unsigned long vaddr ,
838
- unsigned long vaddr_end , int op )
865
+ static unsigned long __set_pages_state (struct snp_psc_desc * data , unsigned long vaddr ,
866
+ unsigned long vaddr_end , int op )
839
867
{
840
868
struct ghcb_state state ;
869
+ bool use_large_entry ;
841
870
struct psc_hdr * hdr ;
842
871
struct psc_entry * e ;
843
872
unsigned long flags ;
@@ -851,73 +880,81 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
851
880
memset (data , 0 , sizeof (* data ));
852
881
i = 0 ;
853
882
854
- while (vaddr < vaddr_end ) {
855
- if (is_vmalloc_addr ((void * )vaddr ))
883
+ while (vaddr < vaddr_end && i < ARRAY_SIZE (data -> entries )) {
884
+ hdr -> end_entry = i ;
885
+
886
+ if (is_vmalloc_addr ((void * )vaddr )) {
856
887
pfn = vmalloc_to_pfn ((void * )vaddr );
857
- else
888
+ use_large_entry = false;
889
+ } else {
858
890
pfn = __pa (vaddr ) >> PAGE_SHIFT ;
891
+ use_large_entry = true;
892
+ }
859
893
860
894
e -> gfn = pfn ;
861
895
e -> operation = op ;
862
- hdr -> end_entry = i ;
863
896
864
- /*
865
- * Current SNP implementation doesn't keep track of the RMP page
866
- * size so use 4K for simplicity.
867
- */
868
- e -> pagesize = RMP_PG_SIZE_4K ;
897
+ if (use_large_entry && IS_ALIGNED (vaddr , PMD_SIZE ) &&
898
+ (vaddr_end - vaddr ) >= PMD_SIZE ) {
899
+ e -> pagesize = RMP_PG_SIZE_2M ;
900
+ vaddr += PMD_SIZE ;
901
+ } else {
902
+ e -> pagesize = RMP_PG_SIZE_4K ;
903
+ vaddr += PAGE_SIZE ;
904
+ }
869
905
870
- vaddr = vaddr + PAGE_SIZE ;
871
906
e ++ ;
872
907
i ++ ;
873
908
}
874
909
910
+ /* Page validation must be rescinded before changing to shared */
911
+ if (op == SNP_PAGE_STATE_SHARED )
912
+ pvalidate_pages (data );
913
+
875
914
local_irq_save (flags );
876
915
877
916
if (sev_cfg .ghcbs_initialized )
878
917
ghcb = __sev_get_ghcb (& state );
879
918
else
880
919
ghcb = boot_ghcb ;
881
920
921
+ /* Invoke the hypervisor to perform the page state changes */
882
922
if (!ghcb || vmgexit_psc (ghcb , data ))
883
923
sev_es_terminate (SEV_TERM_SET_LINUX , GHCB_TERM_PSC );
884
924
885
925
if (sev_cfg .ghcbs_initialized )
886
926
__sev_put_ghcb (& state );
887
927
888
928
local_irq_restore (flags );
929
+
930
+ /* Page validation must be performed after changing to private */
931
+ if (op == SNP_PAGE_STATE_PRIVATE )
932
+ pvalidate_pages (data );
933
+
934
+ return vaddr ;
889
935
}
890
936
891
937
static void set_pages_state (unsigned long vaddr , unsigned long npages , int op )
892
938
{
893
- unsigned long vaddr_end , next_vaddr ;
894
939
struct snp_psc_desc desc ;
940
+ unsigned long vaddr_end ;
895
941
896
942
/* Use the MSR protocol when a GHCB is not available. */
897
943
if (!boot_ghcb )
898
- return early_set_pages_state (__pa (vaddr ), npages , op );
944
+ return early_set_pages_state (vaddr , __pa (vaddr ), npages , op );
899
945
900
946
vaddr = vaddr & PAGE_MASK ;
901
947
vaddr_end = vaddr + (npages << PAGE_SHIFT );
902
948
903
- while (vaddr < vaddr_end ) {
904
- /* Calculate the last vaddr that fits in one struct snp_psc_desc. */
905
- next_vaddr = min_t (unsigned long , vaddr_end ,
906
- (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE ) + vaddr );
907
-
908
- __set_pages_state (& desc , vaddr , next_vaddr , op );
909
-
910
- vaddr = next_vaddr ;
911
- }
949
+ while (vaddr < vaddr_end )
950
+ vaddr = __set_pages_state (& desc , vaddr , vaddr_end , op );
912
951
}
913
952
914
953
void snp_set_memory_shared (unsigned long vaddr , unsigned long npages )
915
954
{
916
955
if (!cc_platform_has (CC_ATTR_GUEST_SEV_SNP ))
917
956
return ;
918
957
919
- pvalidate_pages (vaddr , npages , false);
920
-
921
958
set_pages_state (vaddr , npages , SNP_PAGE_STATE_SHARED );
922
959
}
923
960
@@ -927,8 +964,6 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
927
964
return ;
928
965
929
966
set_pages_state (vaddr , npages , SNP_PAGE_STATE_PRIVATE );
930
-
931
- pvalidate_pages (vaddr , npages , true);
932
967
}
933
968
934
969
static int snp_set_vmsa (void * va , bool vmsa )
0 commit comments