Skip to content

Commit 15d9088

Browse files
tlendackybp3tk0v
authored andcommitted
x86/sev: Use large PSC requests if applicable
In advance of providing support for unaccepted memory, request 2M Page State Change (PSC) requests when the address range allows for it. By using a 2M page size, more PSC operations can be handled in a single request to the hypervisor. The hypervisor will determine if it can accommodate the larger request by checking the mapping in the nested page table. If mapped as a large page, then the 2M page request can be performed, otherwise the 2M page request will be broken down into 512 4K page requests. This is still more efficient than having the guest perform multiple PSC requests in order to process the 512 4K pages. In conjunction with the 2M PSC requests, attempt to perform the associated PVALIDATE instruction of the page using the 2M page size. If PVALIDATE fails with a size mismatch, then fallback to validating 512 4K pages. To do this, page validation is modified to work with the PSC structure and not just a virtual address range. Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Link: https://lore.kernel.org/r/050d17b460dfc237b51d72082e5df4498d3513cb.1686063086.git.thomas.lendacky@amd.com
1 parent 7006b75 commit 15d9088

File tree

2 files changed

+84
-45
lines changed

2 files changed

+84
-45
lines changed

arch/x86/include/asm/sev.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,11 +80,15 @@ extern void vc_no_ghcb(void);
8080
extern void vc_boot_ghcb(void);
8181
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
8282

83+
/* PVALIDATE return codes */
84+
#define PVALIDATE_FAIL_SIZEMISMATCH 6
85+
8386
/* Software defined (when rFlags.CF = 1) */
8487
#define PVALIDATE_FAIL_NOUPDATE 255
8588

8689
/* RMP page size */
8790
#define RMP_PG_SIZE_4K 0
91+
#define RMP_PG_SIZE_2M 1
8892

8993
#define RMPADJUST_VMSA_PAGE_BIT BIT(16)
9094

arch/x86/kernel/sev.c

Lines changed: 80 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -657,32 +657,58 @@ static u64 __init get_jump_table_addr(void)
657657
return ret;
658658
}
659659

660-
static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
660+
static void pvalidate_pages(struct snp_psc_desc *desc)
661661
{
662-
unsigned long vaddr_end;
662+
struct psc_entry *e;
663+
unsigned long vaddr;
664+
unsigned int size;
665+
unsigned int i;
666+
bool validate;
663667
int rc;
664668

665-
vaddr = vaddr & PAGE_MASK;
666-
vaddr_end = vaddr + (npages << PAGE_SHIFT);
669+
for (i = 0; i <= desc->hdr.end_entry; i++) {
670+
e = &desc->entries[i];
671+
672+
vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
673+
size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
674+
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
675+
676+
rc = pvalidate(vaddr, size, validate);
677+
if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
678+
unsigned long vaddr_end = vaddr + PMD_SIZE;
679+
680+
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
681+
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
682+
if (rc)
683+
break;
684+
}
685+
}
667686

668-
while (vaddr < vaddr_end) {
669-
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
670687
if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
671688
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
672-
673-
vaddr = vaddr + PAGE_SIZE;
674689
}
675690
}
676691

677-
static void early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
692+
static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
693+
unsigned long npages, enum psc_op op)
678694
{
679695
unsigned long paddr_end;
680696
u64 val;
697+
int ret;
698+
699+
vaddr = vaddr & PAGE_MASK;
681700

682701
paddr = paddr & PAGE_MASK;
683702
paddr_end = paddr + (npages << PAGE_SHIFT);
684703

685704
while (paddr < paddr_end) {
705+
if (op == SNP_PAGE_STATE_SHARED) {
706+
/* Page validation must be rescinded before changing to shared */
707+
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
708+
if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
709+
goto e_term;
710+
}
711+
686712
/*
687713
* Use the MSR protocol because this function can be called before
688714
* the GHCB is established.
@@ -703,7 +729,15 @@ static void early_set_pages_state(unsigned long paddr, unsigned long npages, enu
703729
paddr, GHCB_MSR_PSC_RESP_VAL(val)))
704730
goto e_term;
705731

706-
paddr = paddr + PAGE_SIZE;
732+
if (op == SNP_PAGE_STATE_PRIVATE) {
733+
/* Page validation must be performed after changing to private */
734+
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
735+
if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
736+
goto e_term;
737+
}
738+
739+
vaddr += PAGE_SIZE;
740+
paddr += PAGE_SIZE;
707741
}
708742

709743
return;
@@ -728,10 +762,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
728762
* Ask the hypervisor to mark the memory pages as private in the RMP
729763
* table.
730764
*/
731-
early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
732-
733-
/* Validate the memory pages after they've been added in the RMP table. */
734-
pvalidate_pages(vaddr, npages, true);
765+
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
735766
}
736767

737768
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
@@ -746,11 +777,8 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
746777
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
747778
return;
748779

749-
/* Invalidate the memory pages before they are marked shared in the RMP table. */
750-
pvalidate_pages(vaddr, npages, false);
751-
752780
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
753-
early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
781+
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
754782
}
755783

756784
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
@@ -834,10 +862,11 @@ static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
834862
return ret;
835863
}
836864

837-
static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
838-
unsigned long vaddr_end, int op)
865+
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
866+
unsigned long vaddr_end, int op)
839867
{
840868
struct ghcb_state state;
869+
bool use_large_entry;
841870
struct psc_hdr *hdr;
842871
struct psc_entry *e;
843872
unsigned long flags;
@@ -851,73 +880,81 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
851880
memset(data, 0, sizeof(*data));
852881
i = 0;
853882

854-
while (vaddr < vaddr_end) {
855-
if (is_vmalloc_addr((void *)vaddr))
883+
while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
884+
hdr->end_entry = i;
885+
886+
if (is_vmalloc_addr((void *)vaddr)) {
856887
pfn = vmalloc_to_pfn((void *)vaddr);
857-
else
888+
use_large_entry = false;
889+
} else {
858890
pfn = __pa(vaddr) >> PAGE_SHIFT;
891+
use_large_entry = true;
892+
}
859893

860894
e->gfn = pfn;
861895
e->operation = op;
862-
hdr->end_entry = i;
863896

864-
/*
865-
* Current SNP implementation doesn't keep track of the RMP page
866-
* size so use 4K for simplicity.
867-
*/
868-
e->pagesize = RMP_PG_SIZE_4K;
897+
if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
898+
(vaddr_end - vaddr) >= PMD_SIZE) {
899+
e->pagesize = RMP_PG_SIZE_2M;
900+
vaddr += PMD_SIZE;
901+
} else {
902+
e->pagesize = RMP_PG_SIZE_4K;
903+
vaddr += PAGE_SIZE;
904+
}
869905

870-
vaddr = vaddr + PAGE_SIZE;
871906
e++;
872907
i++;
873908
}
874909

910+
/* Page validation must be rescinded before changing to shared */
911+
if (op == SNP_PAGE_STATE_SHARED)
912+
pvalidate_pages(data);
913+
875914
local_irq_save(flags);
876915

877916
if (sev_cfg.ghcbs_initialized)
878917
ghcb = __sev_get_ghcb(&state);
879918
else
880919
ghcb = boot_ghcb;
881920

921+
/* Invoke the hypervisor to perform the page state changes */
882922
if (!ghcb || vmgexit_psc(ghcb, data))
883923
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
884924

885925
if (sev_cfg.ghcbs_initialized)
886926
__sev_put_ghcb(&state);
887927

888928
local_irq_restore(flags);
929+
930+
/* Page validation must be performed after changing to private */
931+
if (op == SNP_PAGE_STATE_PRIVATE)
932+
pvalidate_pages(data);
933+
934+
return vaddr;
889935
}
890936

891937
static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
892938
{
893-
unsigned long vaddr_end, next_vaddr;
894939
struct snp_psc_desc desc;
940+
unsigned long vaddr_end;
895941

896942
/* Use the MSR protocol when a GHCB is not available. */
897943
if (!boot_ghcb)
898-
return early_set_pages_state(__pa(vaddr), npages, op);
944+
return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
899945

900946
vaddr = vaddr & PAGE_MASK;
901947
vaddr_end = vaddr + (npages << PAGE_SHIFT);
902948

903-
while (vaddr < vaddr_end) {
904-
/* Calculate the last vaddr that fits in one struct snp_psc_desc. */
905-
next_vaddr = min_t(unsigned long, vaddr_end,
906-
(VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
907-
908-
__set_pages_state(&desc, vaddr, next_vaddr, op);
909-
910-
vaddr = next_vaddr;
911-
}
949+
while (vaddr < vaddr_end)
950+
vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
912951
}
913952

914953
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
915954
{
916955
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
917956
return;
918957

919-
pvalidate_pages(vaddr, npages, false);
920-
921958
set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
922959
}
923960

@@ -927,8 +964,6 @@ void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
927964
return;
928965

929966
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
930-
931-
pvalidate_pages(vaddr, npages, true);
932967
}
933968

934969
static int snp_set_vmsa(void *va, bool vmsa)

0 commit comments

Comments
 (0)