Skip to content

Commit fcd042e

Browse files
tlendackybp3tk0v
authored andcommitted
x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0
The PVALIDATE instruction can only be performed at VMPL0. If an SVSM is present, it will be running at VMPL0 while the guest itself is then running at VMPL1 or a lower privilege level. In that case, use the SVSM_CORE_PVALIDATE call to perform memory validation instead of issuing the PVALIDATE instruction directly. The validation of a single 4K page is now explicitly identified as such in the function name, pvalidate_4k_page(). The pvalidate_pages() function is used for validating 1 or more pages at either 4K or 2M in size. Each function, however, determines whether it can issue the PVALIDATE directly or whether the SVSM needs to be invoked. [ bp: Touchups. ] [ Tom: fold in a fix for Coconut SVSM: https://lore.kernel.org/r/[email protected] ] Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Link: https://lore.kernel.org/r/4c4017d8b94512d565de9ccb555b1a9f8983c69c.1717600736.git.thomas.lendacky@amd.com
1 parent 34ff659 commit fcd042e

File tree

4 files changed

+328
-24
lines changed

4 files changed

+328
-24
lines changed

arch/x86/boot/compressed/sev.c

Lines changed: 42 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,34 @@ static bool fault_in_kernel_space(unsigned long address)
129129
/* Include code for early handlers */
130130
#include "../../kernel/sev-shared.c"
131131

132+
static struct svsm_ca *svsm_get_caa(void)
133+
{
134+
return boot_svsm_caa;
135+
}
136+
137+
static u64 svsm_get_caa_pa(void)
138+
{
139+
return boot_svsm_caa_pa;
140+
}
141+
142+
static int svsm_perform_call_protocol(struct svsm_call *call)
143+
{
144+
struct ghcb *ghcb;
145+
int ret;
146+
147+
if (boot_ghcb)
148+
ghcb = boot_ghcb;
149+
else
150+
ghcb = NULL;
151+
152+
do {
153+
ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
154+
: svsm_perform_msr_protocol(call);
155+
} while (ret == -EAGAIN);
156+
157+
return ret;
158+
}
159+
132160
bool sev_snp_enabled(void)
133161
{
134162
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
@@ -145,8 +173,8 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
145173
* If private -> shared then invalidate the page before requesting the
146174
* state change in the RMP table.
147175
*/
148-
if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
149-
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
176+
if (op == SNP_PAGE_STATE_SHARED)
177+
pvalidate_4k_page(paddr, paddr, false);
150178

151179
/* Issue VMGEXIT to change the page state in RMP table. */
152180
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
@@ -161,8 +189,8 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
161189
* Now that page state is changed in the RMP table, validate it so that it is
162190
* consistent with the RMP entry.
163191
*/
164-
if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
165-
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
192+
if (op == SNP_PAGE_STATE_PRIVATE)
193+
pvalidate_4k_page(paddr, paddr, true);
166194
}
167195

168196
void snp_set_page_private(unsigned long paddr)
@@ -255,6 +283,16 @@ void sev_es_shutdown_ghcb(void)
255283
if (!sev_es_check_cpu_features())
256284
error("SEV-ES CPU Features missing.");
257285

286+
/*
287+
* This denotes whether to use the GHCB MSR protocol or the GHCB
288+
* shared page to perform a GHCB request. Since the GHCB page is
289+
* being changed to encrypted, it can't be used to perform GHCB
290+
* requests. Clear the boot_ghcb variable so that the GHCB MSR
291+
* protocol is used to change the GHCB page over to an encrypted
292+
* page.
293+
*/
294+
boot_ghcb = NULL;
295+
258296
/*
259297
* GHCB Page must be flushed from the cache and mapped encrypted again.
260298
* Otherwise the running kernel will see strange cache effects when

arch/x86/include/asm/sev.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,31 @@ struct svsm_ca {
187187
#define SVSM_ERR_INVALID_PARAMETER 0x80000005
188188
#define SVSM_ERR_INVALID_REQUEST 0x80000006
189189
#define SVSM_ERR_BUSY 0x80000007
190+
#define SVSM_PVALIDATE_FAIL_SIZEMISMATCH 0x80001006
191+
192+
/*
193+
* The SVSM PVALIDATE related structures
194+
*/
195+
struct svsm_pvalidate_entry {
196+
u64 page_size : 2,
197+
action : 1,
198+
ignore_cf : 1,
199+
rsvd : 8,
200+
pfn : 52;
201+
};
202+
203+
struct svsm_pvalidate_call {
204+
u16 num_entries;
205+
u16 cur_index;
206+
207+
u8 rsvd1[4];
208+
209+
struct svsm_pvalidate_entry entry[];
210+
};
211+
212+
#define SVSM_PVALIDATE_MAX_COUNT ((sizeof_field(struct svsm_ca, svsm_buffer) - \
213+
offsetof(struct svsm_pvalidate_call, entry)) / \
214+
sizeof(struct svsm_pvalidate_entry))
190215

191216
/*
192217
* SVSM protocol structure
@@ -207,6 +232,7 @@ struct svsm_call {
207232

208233
#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
209234
#define SVSM_CORE_REMAP_CA 0
235+
#define SVSM_CORE_PVALIDATE 1
210236

211237
#ifdef CONFIG_AMD_MEM_ENCRYPT
212238
extern void __sev_es_ist_enter(struct pt_regs *regs);

0 commit comments

Comments
 (0)