Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit a67f6b6

Browse files
dcuiliuw
authored andcommitted
x86/hyperv: Move the code in ivm.c around to avoid unnecessary ifdef's
Group the code this way so that we can avoid too many ifdef's: Data only used in an SNP VM with the paravisor; Functions only used in an SNP VM with the paravisor; Data only used in an SNP VM without the paravisor; Functions only used in an SNP VM without the paravisor; Functions only used in a TDX VM, with and without the paravisor; Functions used in an SNP or TDX VM, when the paravisor is present; Functions always used, even in a regular non-CoCo VM. No functional change. Signed-off-by: Dexuan Cui <[email protected]> Reviewed-by: Michael Kelley <[email protected]> Reviewed-by: Tianyu Lan <[email protected]> Signed-off-by: Wei Liu <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent e3131f1 commit a67f6b6

File tree

1 file changed

+150
-159
lines changed

1 file changed

+150
-159
lines changed

arch/x86/hyperv/ivm.c

Lines changed: 150 additions & 159 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,6 @@
3030

3131
#define GHCB_USAGE_HYPERV_CALL 1
3232

33-
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
34-
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
35-
3633
union hv_ghcb {
3734
struct ghcb ghcb;
3835
struct {
@@ -66,10 +63,10 @@ union hv_ghcb {
6663
} hypercall;
6764
} __packed __aligned(HV_HYP_PAGE_SIZE);
6865

69-
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
70-
66+
/* Only used in an SNP VM with the paravisor */
7167
static u16 hv_ghcb_version __ro_after_init;
7268

69+
/* Functions only used in an SNP VM with the paravisor go here. */
7370
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
7471
{
7572
union hv_ghcb *hv_ghcb;
@@ -247,6 +244,140 @@ static void hv_ghcb_msr_read(u64 msr, u64 *value)
247244
local_irq_restore(flags);
248245
}
249246

247+
/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
248+
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
249+
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
250+
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
251+
252+
/* Functions only used in an SNP VM without the paravisor go here. */
253+
254+
#define hv_populate_vmcb_seg(seg, gdtr_base) \
255+
do { \
256+
if (seg.selector) { \
257+
seg.base = 0; \
258+
seg.limit = HV_AP_SEGMENT_LIMIT; \
259+
seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
260+
seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
261+
} \
262+
} while (0) \
263+
264+
static int snp_set_vmsa(void *va, bool vmsa)
265+
{
266+
u64 attrs;
267+
268+
/*
269+
* Running at VMPL0 allows the kernel to change the VMSA bit for a page
270+
* using the RMPADJUST instruction. However, for the instruction to
271+
* succeed it must target the permissions of a lesser privileged
272+
* (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
273+
* instruction in the AMD64 APM Volume 3).
274+
*/
275+
attrs = 1;
276+
if (vmsa)
277+
attrs |= RMPADJUST_VMSA_PAGE_BIT;
278+
279+
return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
280+
}
281+
282+
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
283+
{
284+
int err;
285+
286+
err = snp_set_vmsa(vmsa, false);
287+
if (err)
288+
pr_err("clear VMSA page failed (%u), leaking page\n", err);
289+
else
290+
free_page((unsigned long)vmsa);
291+
}
292+
293+
int hv_snp_boot_ap(int cpu, unsigned long start_ip)
294+
{
295+
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
296+
__get_free_page(GFP_KERNEL | __GFP_ZERO);
297+
struct sev_es_save_area *cur_vmsa;
298+
struct desc_ptr gdtr;
299+
u64 ret, retry = 5;
300+
struct hv_enable_vp_vtl *start_vp_input;
301+
unsigned long flags;
302+
303+
if (!vmsa)
304+
return -ENOMEM;
305+
306+
native_store_gdt(&gdtr);
307+
308+
vmsa->gdtr.base = gdtr.address;
309+
vmsa->gdtr.limit = gdtr.size;
310+
311+
asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
312+
hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
313+
314+
asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
315+
hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
316+
317+
asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
318+
hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
319+
320+
asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
321+
hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
322+
323+
vmsa->efer = native_read_msr(MSR_EFER);
324+
325+
asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
326+
asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
327+
asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
328+
329+
vmsa->xcr0 = 1;
330+
vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
331+
vmsa->rip = (u64)secondary_startup_64_no_verify;
332+
vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
333+
334+
/*
335+
* Set the SNP-specific fields for this VMSA:
336+
* VMPL level
337+
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
338+
*/
339+
vmsa->vmpl = 0;
340+
vmsa->sev_features = sev_status >> 2;
341+
342+
ret = snp_set_vmsa(vmsa, true);
343+
if (!ret) {
344+
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
345+
free_page((u64)vmsa);
346+
return ret;
347+
}
348+
349+
local_irq_save(flags);
350+
start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
351+
memset(start_vp_input, 0, sizeof(*start_vp_input));
352+
start_vp_input->partition_id = -1;
353+
start_vp_input->vp_index = cpu;
354+
start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
355+
*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
356+
357+
do {
358+
ret = hv_do_hypercall(HVCALL_START_VP,
359+
start_vp_input, NULL);
360+
} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
361+
362+
local_irq_restore(flags);
363+
364+
if (!hv_result_success(ret)) {
365+
pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
366+
snp_cleanup_vmsa(vmsa);
367+
vmsa = NULL;
368+
}
369+
370+
cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
371+
/* Free up any previous VMSA page */
372+
if (cur_vmsa)
373+
snp_cleanup_vmsa(cur_vmsa);
374+
375+
/* Record the current VMSA page */
376+
per_cpu(hv_sev_vmsa, cpu) = vmsa;
377+
378+
return ret;
379+
}
380+
250381
#else
251382
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
252383
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
@@ -282,6 +413,20 @@ static void hv_tdx_msr_read(u64 msr, u64 *val)
282413
else
283414
*val = args.r11;
284415
}
416+
417+
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
418+
{
419+
struct tdx_hypercall_args args = { };
420+
421+
args.r10 = control;
422+
args.rdx = param1;
423+
args.r8 = param2;
424+
425+
(void)__tdx_hypercall_ret(&args);
426+
427+
return args.r11;
428+
}
429+
285430
#else
286431
static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
287432
static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
@@ -309,9 +454,7 @@ void hv_ivm_msr_read(u64 msr, u64 *value)
309454
else if (hv_isolation_type_snp())
310455
hv_ghcb_msr_read(msr, value);
311456
}
312-
#endif
313457

314-
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
315458
/*
316459
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.
317460
*
@@ -432,141 +575,6 @@ static bool hv_is_private_mmio(u64 addr)
432575
return false;
433576
}
434577

435-
#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
436-
437-
#ifdef CONFIG_AMD_MEM_ENCRYPT
438-
439-
#define hv_populate_vmcb_seg(seg, gdtr_base) \
440-
do { \
441-
if (seg.selector) { \
442-
seg.base = 0; \
443-
seg.limit = HV_AP_SEGMENT_LIMIT; \
444-
seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
445-
seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
446-
} \
447-
} while (0) \
448-
449-
static int snp_set_vmsa(void *va, bool vmsa)
450-
{
451-
u64 attrs;
452-
453-
/*
454-
* Running at VMPL0 allows the kernel to change the VMSA bit for a page
455-
* using the RMPADJUST instruction. However, for the instruction to
456-
* succeed it must target the permissions of a lesser privileged
457-
* (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
458-
* instruction in the AMD64 APM Volume 3).
459-
*/
460-
attrs = 1;
461-
if (vmsa)
462-
attrs |= RMPADJUST_VMSA_PAGE_BIT;
463-
464-
return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
465-
}
466-
467-
static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
468-
{
469-
int err;
470-
471-
err = snp_set_vmsa(vmsa, false);
472-
if (err)
473-
pr_err("clear VMSA page failed (%u), leaking page\n", err);
474-
else
475-
free_page((unsigned long)vmsa);
476-
}
477-
478-
int hv_snp_boot_ap(int cpu, unsigned long start_ip)
479-
{
480-
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
481-
__get_free_page(GFP_KERNEL | __GFP_ZERO);
482-
struct sev_es_save_area *cur_vmsa;
483-
struct desc_ptr gdtr;
484-
u64 ret, retry = 5;
485-
struct hv_enable_vp_vtl *start_vp_input;
486-
unsigned long flags;
487-
488-
if (!vmsa)
489-
return -ENOMEM;
490-
491-
native_store_gdt(&gdtr);
492-
493-
vmsa->gdtr.base = gdtr.address;
494-
vmsa->gdtr.limit = gdtr.size;
495-
496-
asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
497-
hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
498-
499-
asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
500-
hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
501-
502-
asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
503-
hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
504-
505-
asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
506-
hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
507-
508-
vmsa->efer = native_read_msr(MSR_EFER);
509-
510-
asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
511-
asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
512-
asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
513-
514-
vmsa->xcr0 = 1;
515-
vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
516-
vmsa->rip = (u64)secondary_startup_64_no_verify;
517-
vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
518-
519-
/*
520-
* Set the SNP-specific fields for this VMSA:
521-
* VMPL level
522-
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
523-
*/
524-
vmsa->vmpl = 0;
525-
vmsa->sev_features = sev_status >> 2;
526-
527-
ret = snp_set_vmsa(vmsa, true);
528-
if (!ret) {
529-
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
530-
free_page((u64)vmsa);
531-
return ret;
532-
}
533-
534-
local_irq_save(flags);
535-
start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
536-
memset(start_vp_input, 0, sizeof(*start_vp_input));
537-
start_vp_input->partition_id = -1;
538-
start_vp_input->vp_index = cpu;
539-
start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
540-
*(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
541-
542-
do {
543-
ret = hv_do_hypercall(HVCALL_START_VP,
544-
start_vp_input, NULL);
545-
} while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
546-
547-
local_irq_restore(flags);
548-
549-
if (!hv_result_success(ret)) {
550-
pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
551-
snp_cleanup_vmsa(vmsa);
552-
vmsa = NULL;
553-
}
554-
555-
cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
556-
/* Free up any previous VMSA page */
557-
if (cur_vmsa)
558-
snp_cleanup_vmsa(cur_vmsa);
559-
560-
/* Record the current VMSA page */
561-
per_cpu(hv_sev_vmsa, cpu) = vmsa;
562-
563-
return ret;
564-
}
565-
566-
#endif /* CONFIG_AMD_MEM_ENCRYPT */
567-
568-
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
569-
570578
void __init hv_vtom_init(void)
571579
{
572580
enum hv_isolation_type type = hv_get_isolation_type();
@@ -654,20 +662,3 @@ bool hv_isolation_type_tdx(void)
654662
{
655663
return static_branch_unlikely(&isolation_type_tdx);
656664
}
657-
658-
#ifdef CONFIG_INTEL_TDX_GUEST
659-
660-
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
661-
{
662-
struct tdx_hypercall_args args = { };
663-
664-
args.r10 = control;
665-
args.rdx = param1;
666-
args.r8 = param2;
667-
668-
(void)__tdx_hypercall_ret(&args);
669-
670-
return args.r11;
671-
}
672-
673-
#endif

0 commit comments

Comments
 (0)