Skip to content

Commit 34ff659

Browse files
tlendackybp3tk0v
authored andcommitted
x86/sev: Use kernel provided SVSM Calling Areas
The SVSM Calling Area (CA) is used to communicate between Linux and the SVSM. Since the firmware supplied CA for the BSP is likely to be in reserved memory, switch off that CA to a kernel provided CA so that access and use of the CA is available during boot. The CA switch is done using the SVSM core protocol SVSM_CORE_REMAP_CA call. An SVSM call is executed by filling out the SVSM CA and setting the proper register state as documented by the SVSM protocol. The SVSM is invoked by by requesting the hypervisor to run VMPL0. Once it is safe to allocate/reserve memory, allocate a CA for each CPU. After allocating the new CAs, the BSP will switch from the boot CA to the per-CPU CA. The CA for an AP is identified to the SVSM when creating the VMSA in preparation for booting the AP. [ bp: Heavily simplify svsm_issue_call() asm, other touchups. ] Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Link: https://lore.kernel.org/r/fa8021130bcc3bcf14d722a25548cb0cdf325456.1717600736.git.thomas.lendacky@amd.com
1 parent 878e70d commit 34ff659

File tree

6 files changed

+362
-39
lines changed

6 files changed

+362
-39
lines changed

arch/x86/include/asm/sev-common.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,19 @@ enum psc_op {
9898
/* GHCBData[63:32] */ \
9999
(((u64)(val) & GENMASK_ULL(63, 32)) >> 32)
100100

101+
/* GHCB Run at VMPL Request/Response */
102+
#define GHCB_MSR_VMPL_REQ 0x016
103+
#define GHCB_MSR_VMPL_REQ_LEVEL(v) \
104+
/* GHCBData[39:32] */ \
105+
(((u64)(v) & GENMASK_ULL(7, 0) << 32) | \
106+
/* GHCBDdata[11:0] */ \
107+
GHCB_MSR_VMPL_REQ)
108+
109+
#define GHCB_MSR_VMPL_RESP 0x017
110+
#define GHCB_MSR_VMPL_RESP_VAL(v) \
111+
/* GHCBData[63:32] */ \
112+
(((u64)(v) & GENMASK_ULL(63, 32)) >> 32)
113+
101114
/* GHCB Hypervisor Feature Request/Response */
102115
#define GHCB_MSR_HV_FT_REQ 0x080
103116
#define GHCB_MSR_HV_FT_RESP 0x081

arch/x86/include/asm/sev.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,36 @@ struct svsm_ca {
178178
u8 svsm_buffer[PAGE_SIZE - 8];
179179
};
180180

181+
#define SVSM_SUCCESS 0
182+
#define SVSM_ERR_INCOMPLETE 0x80000000
183+
#define SVSM_ERR_UNSUPPORTED_PROTOCOL 0x80000001
184+
#define SVSM_ERR_UNSUPPORTED_CALL 0x80000002
185+
#define SVSM_ERR_INVALID_ADDRESS 0x80000003
186+
#define SVSM_ERR_INVALID_FORMAT 0x80000004
187+
#define SVSM_ERR_INVALID_PARAMETER 0x80000005
188+
#define SVSM_ERR_INVALID_REQUEST 0x80000006
189+
#define SVSM_ERR_BUSY 0x80000007
190+
191+
/*
192+
* SVSM protocol structure
193+
*/
194+
struct svsm_call {
195+
struct svsm_ca *caa;
196+
u64 rax;
197+
u64 rcx;
198+
u64 rdx;
199+
u64 r8;
200+
u64 r9;
201+
u64 rax_out;
202+
u64 rcx_out;
203+
u64 rdx_out;
204+
u64 r8_out;
205+
u64 r9_out;
206+
};
207+
208+
#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
209+
#define SVSM_CORE_REMAP_CA 0
210+
181211
#ifdef CONFIG_AMD_MEM_ENCRYPT
182212
extern void __sev_es_ist_enter(struct pt_regs *regs);
183213
extern void __sev_es_ist_exit(void);
@@ -260,6 +290,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
260290
u64 snp_get_unsupported_features(u64 status);
261291
u64 sev_get_status(void);
262292
void sev_show_status(void);
293+
void snp_update_svsm_ca(void);
263294
#else
264295
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
265296
static inline void sev_es_ist_exit(void) { }
@@ -289,6 +320,7 @@ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
289320
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
290321
static inline u64 sev_get_status(void) { return 0; }
291322
static inline void sev_show_status(void) { }
323+
static inline void snp_update_svsm_ca(void) { }
292324
#endif
293325

294326
#ifdef CONFIG_KVM_AMD_SEV

arch/x86/include/uapi/asm/svm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@
115115
#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
116116
#define SVM_VMGEXIT_AP_CREATE 1
117117
#define SVM_VMGEXIT_AP_DESTROY 2
118+
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
118119
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
119120
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
120121
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \

arch/x86/kernel/sev-shared.c

Lines changed: 126 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
#define WARN(condition, format...) (!!(condition))
2222
#define sev_printk(fmt, ...)
2323
#define sev_printk_rtl(fmt, ...)
24+
#undef vc_forward_exception
25+
#define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n")
2426
#endif
2527

2628
/*
@@ -244,6 +246,126 @@ static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt
244246
return ES_VMM_ERROR;
245247
}
246248

249+
static inline int svsm_process_result_codes(struct svsm_call *call)
250+
{
251+
switch (call->rax_out) {
252+
case SVSM_SUCCESS:
253+
return 0;
254+
case SVSM_ERR_INCOMPLETE:
255+
case SVSM_ERR_BUSY:
256+
return -EAGAIN;
257+
default:
258+
return -EINVAL;
259+
}
260+
}
261+
262+
/*
263+
* Issue a VMGEXIT to call the SVSM:
264+
* - Load the SVSM register state (RAX, RCX, RDX, R8 and R9)
265+
* - Set the CA call pending field to 1
266+
* - Issue VMGEXIT
267+
* - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9)
268+
* - Perform atomic exchange of the CA call pending field
269+
*
270+
* - See the "Secure VM Service Module for SEV-SNP Guests" specification for
271+
* details on the calling convention.
272+
* - The calling convention loosely follows the Microsoft X64 calling
273+
* convention by putting arguments in RCX, RDX, R8 and R9.
274+
* - RAX specifies the SVSM protocol/callid as input and the return code
275+
* as output.
276+
*/
277+
static __always_inline void svsm_issue_call(struct svsm_call *call, u8 *pending)
278+
{
279+
register unsigned long rax asm("rax") = call->rax;
280+
register unsigned long rcx asm("rcx") = call->rcx;
281+
register unsigned long rdx asm("rdx") = call->rdx;
282+
register unsigned long r8 asm("r8") = call->r8;
283+
register unsigned long r9 asm("r9") = call->r9;
284+
285+
call->caa->call_pending = 1;
286+
287+
asm volatile("rep; vmmcall\n\t"
288+
: "+r" (rax), "+r" (rcx), "+r" (rdx), "+r" (r8), "+r" (r9)
289+
: : "memory");
290+
291+
*pending = xchg(&call->caa->call_pending, *pending);
292+
293+
call->rax_out = rax;
294+
call->rcx_out = rcx;
295+
call->rdx_out = rdx;
296+
call->r8_out = r8;
297+
call->r9_out = r9;
298+
}
299+
300+
static int svsm_perform_msr_protocol(struct svsm_call *call)
301+
{
302+
u8 pending = 0;
303+
u64 val, resp;
304+
305+
/*
306+
* When using the MSR protocol, be sure to save and restore
307+
* the current MSR value.
308+
*/
309+
val = sev_es_rd_ghcb_msr();
310+
311+
sev_es_wr_ghcb_msr(GHCB_MSR_VMPL_REQ_LEVEL(0));
312+
313+
svsm_issue_call(call, &pending);
314+
315+
resp = sev_es_rd_ghcb_msr();
316+
317+
sev_es_wr_ghcb_msr(val);
318+
319+
if (pending)
320+
return -EINVAL;
321+
322+
if (GHCB_RESP_CODE(resp) != GHCB_MSR_VMPL_RESP)
323+
return -EINVAL;
324+
325+
if (GHCB_MSR_VMPL_RESP_VAL(resp))
326+
return -EINVAL;
327+
328+
return svsm_process_result_codes(call);
329+
}
330+
331+
static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
332+
{
333+
struct es_em_ctxt ctxt;
334+
u8 pending = 0;
335+
336+
vc_ghcb_invalidate(ghcb);
337+
338+
/*
339+
* Fill in protocol and format specifiers. This can be called very early
340+
* in the boot, so use rip-relative references as needed.
341+
*/
342+
ghcb->protocol_version = RIP_REL_REF(ghcb_version);
343+
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
344+
345+
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL);
346+
ghcb_set_sw_exit_info_1(ghcb, 0);
347+
ghcb_set_sw_exit_info_2(ghcb, 0);
348+
349+
sev_es_wr_ghcb_msr(__pa(ghcb));
350+
351+
svsm_issue_call(call, &pending);
352+
353+
if (pending)
354+
return -EINVAL;
355+
356+
switch (verify_exception_info(ghcb, &ctxt)) {
357+
case ES_OK:
358+
break;
359+
case ES_EXCEPTION:
360+
vc_forward_exception(&ctxt);
361+
fallthrough;
362+
default:
363+
return -EINVAL;
364+
}
365+
366+
return svsm_process_result_codes(call);
367+
}
368+
247369
static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
248370
struct es_em_ctxt *ctxt,
249371
u64 exit_code, u64 exit_info_1,
@@ -1289,7 +1411,7 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
12891411
* Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
12901412
* services needed when not running in VMPL0.
12911413
*/
1292-
static void __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
1414+
static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
12931415
{
12941416
struct snp_secrets_page *secrets_page;
12951417
u64 caa;
@@ -1311,7 +1433,7 @@ static void __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
13111433
* code and the early kernel code.
13121434
*/
13131435
if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1))
1314-
return;
1436+
return false;
13151437

13161438
/*
13171439
* Not running at VMPL0, ensure everything has been properly supplied
@@ -1344,4 +1466,6 @@ static void __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
13441466
*/
13451467
RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa;
13461468
RIP_REL_REF(boot_svsm_caa_pa) = caa;
1469+
1470+
return true;
13471471
}

0 commit comments

Comments
 (0)