|
30 | 30 |
|
31 | 31 | #define GHCB_USAGE_HYPERV_CALL 1
|
32 | 32 |
|
33 |
| -static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); |
34 |
| -static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); |
35 |
| - |
36 | 33 | union hv_ghcb {
|
37 | 34 | struct ghcb ghcb;
|
38 | 35 | struct {
|
@@ -66,10 +63,10 @@ union hv_ghcb {
|
66 | 63 | } hypercall;
|
67 | 64 | } __packed __aligned(HV_HYP_PAGE_SIZE);
|
68 | 65 |
|
69 |
| -static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa); |
70 |
| - |
| 66 | +/* Only used in an SNP VM with the paravisor */ |
71 | 67 | static u16 hv_ghcb_version __ro_after_init;
|
72 | 68 |
|
| 69 | +/* Functions only used in an SNP VM with the paravisor go here. */ |
73 | 70 | u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
|
74 | 71 | {
|
75 | 72 | union hv_ghcb *hv_ghcb;
|
@@ -247,6 +244,140 @@ static void hv_ghcb_msr_read(u64 msr, u64 *value)
|
247 | 244 | local_irq_restore(flags);
|
248 | 245 | }
|
249 | 246 |
|
| 247 | +/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */ |
| 248 | +static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); |
| 249 | +static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); |
| 250 | +static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa); |
| 251 | + |
| 252 | +/* Functions only used in an SNP VM without the paravisor go here. */ |
| 253 | + |
| 254 | +#define hv_populate_vmcb_seg(seg, gdtr_base) \ |
| 255 | +do { \ |
| 256 | + if (seg.selector) { \ |
| 257 | + seg.base = 0; \ |
| 258 | + seg.limit = HV_AP_SEGMENT_LIMIT; \ |
| 259 | + seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ |
| 260 | + seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ |
| 261 | + } \ |
| 262 | +} while (0) \ |
| 263 | + |
| 264 | +static int snp_set_vmsa(void *va, bool vmsa) |
| 265 | +{ |
| 266 | + u64 attrs; |
| 267 | + |
| 268 | + /* |
| 269 | + * Running at VMPL0 allows the kernel to change the VMSA bit for a page |
| 270 | + * using the RMPADJUST instruction. However, for the instruction to |
| 271 | + * succeed it must target the permissions of a lesser privileged |
| 272 | + * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST |
| 273 | + * instruction in the AMD64 APM Volume 3). |
| 274 | + */ |
| 275 | + attrs = 1; |
| 276 | + if (vmsa) |
| 277 | + attrs |= RMPADJUST_VMSA_PAGE_BIT; |
| 278 | + |
| 279 | + return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); |
| 280 | +} |
| 281 | + |
| 282 | +static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) |
| 283 | +{ |
| 284 | + int err; |
| 285 | + |
| 286 | + err = snp_set_vmsa(vmsa, false); |
| 287 | + if (err) |
| 288 | + pr_err("clear VMSA page failed (%u), leaking page\n", err); |
| 289 | + else |
| 290 | + free_page((unsigned long)vmsa); |
| 291 | +} |
| 292 | + |
| 293 | +int hv_snp_boot_ap(int cpu, unsigned long start_ip) |
| 294 | +{ |
| 295 | + struct sev_es_save_area *vmsa = (struct sev_es_save_area *) |
| 296 | + __get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 297 | + struct sev_es_save_area *cur_vmsa; |
| 298 | + struct desc_ptr gdtr; |
| 299 | + u64 ret, retry = 5; |
| 300 | + struct hv_enable_vp_vtl *start_vp_input; |
| 301 | + unsigned long flags; |
| 302 | + |
| 303 | + if (!vmsa) |
| 304 | + return -ENOMEM; |
| 305 | + |
| 306 | + native_store_gdt(&gdtr); |
| 307 | + |
| 308 | + vmsa->gdtr.base = gdtr.address; |
| 309 | + vmsa->gdtr.limit = gdtr.size; |
| 310 | + |
| 311 | + asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector)); |
| 312 | + hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); |
| 313 | + |
| 314 | + asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector)); |
| 315 | + hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); |
| 316 | + |
| 317 | + asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector)); |
| 318 | + hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); |
| 319 | + |
| 320 | + asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); |
| 321 | + hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); |
| 322 | + |
| 323 | + vmsa->efer = native_read_msr(MSR_EFER); |
| 324 | + |
| 325 | + asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4)); |
| 326 | + asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3)); |
| 327 | + asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0)); |
| 328 | + |
| 329 | + vmsa->xcr0 = 1; |
| 330 | + vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; |
| 331 | + vmsa->rip = (u64)secondary_startup_64_no_verify; |
| 332 | + vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; |
| 333 | + |
| 334 | + /* |
| 335 | + * Set the SNP-specific fields for this VMSA: |
| 336 | + * VMPL level |
| 337 | + * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) |
| 338 | + */ |
| 339 | + vmsa->vmpl = 0; |
| 340 | + vmsa->sev_features = sev_status >> 2; |
| 341 | + |
| 342 | + ret = snp_set_vmsa(vmsa, true); |
| 343 | + if (!ret) { |
| 344 | + pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); |
| 345 | + free_page((u64)vmsa); |
| 346 | + return ret; |
| 347 | + } |
| 348 | + |
| 349 | + local_irq_save(flags); |
| 350 | + start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg; |
| 351 | + memset(start_vp_input, 0, sizeof(*start_vp_input)); |
| 352 | + start_vp_input->partition_id = -1; |
| 353 | + start_vp_input->vp_index = cpu; |
| 354 | + start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; |
| 355 | + *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; |
| 356 | + |
| 357 | + do { |
| 358 | + ret = hv_do_hypercall(HVCALL_START_VP, |
| 359 | + start_vp_input, NULL); |
| 360 | + } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); |
| 361 | + |
| 362 | + local_irq_restore(flags); |
| 363 | + |
| 364 | + if (!hv_result_success(ret)) { |
| 365 | + pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); |
| 366 | + snp_cleanup_vmsa(vmsa); |
| 367 | + vmsa = NULL; |
| 368 | + } |
| 369 | + |
| 370 | + cur_vmsa = per_cpu(hv_sev_vmsa, cpu); |
| 371 | + /* Free up any previous VMSA page */ |
| 372 | + if (cur_vmsa) |
| 373 | + snp_cleanup_vmsa(cur_vmsa); |
| 374 | + |
| 375 | + /* Record the current VMSA page */ |
| 376 | + per_cpu(hv_sev_vmsa, cpu) = vmsa; |
| 377 | + |
| 378 | + return ret; |
| 379 | +} |
| 380 | + |
250 | 381 | #else
|
251 | 382 | static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
|
252 | 383 | static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
|
@@ -282,6 +413,20 @@ static void hv_tdx_msr_read(u64 msr, u64 *val)
|
282 | 413 | else
|
283 | 414 | *val = args.r11;
|
284 | 415 | }
|
| 416 | + |
| 417 | +u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) |
| 418 | +{ |
| 419 | + struct tdx_hypercall_args args = { }; |
| 420 | + |
| 421 | + args.r10 = control; |
| 422 | + args.rdx = param1; |
| 423 | + args.r8 = param2; |
| 424 | + |
| 425 | + (void)__tdx_hypercall_ret(&args); |
| 426 | + |
| 427 | + return args.r11; |
| 428 | +} |
| 429 | + |
285 | 430 | #else
|
286 | 431 | static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
|
287 | 432 | static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
|
@@ -309,9 +454,7 @@ void hv_ivm_msr_read(u64 msr, u64 *value)
|
309 | 454 | else if (hv_isolation_type_snp())
|
310 | 455 | hv_ghcb_msr_read(msr, value);
|
311 | 456 | }
|
312 |
| -#endif |
313 | 457 |
|
314 |
| -#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) |
315 | 458 | /*
|
316 | 459 | * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
|
317 | 460 | *
|
@@ -432,141 +575,6 @@ static bool hv_is_private_mmio(u64 addr)
|
432 | 575 | return false;
|
433 | 576 | }
|
434 | 577 |
|
435 |
| -#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */ |
436 |
| - |
437 |
| -#ifdef CONFIG_AMD_MEM_ENCRYPT |
438 |
| - |
439 |
| -#define hv_populate_vmcb_seg(seg, gdtr_base) \ |
440 |
| -do { \ |
441 |
| - if (seg.selector) { \ |
442 |
| - seg.base = 0; \ |
443 |
| - seg.limit = HV_AP_SEGMENT_LIMIT; \ |
444 |
| - seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ |
445 |
| - seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ |
446 |
| - } \ |
447 |
| -} while (0) \ |
448 |
| - |
449 |
| -static int snp_set_vmsa(void *va, bool vmsa) |
450 |
| -{ |
451 |
| - u64 attrs; |
452 |
| - |
453 |
| - /* |
454 |
| - * Running at VMPL0 allows the kernel to change the VMSA bit for a page |
455 |
| - * using the RMPADJUST instruction. However, for the instruction to |
456 |
| - * succeed it must target the permissions of a lesser privileged |
457 |
| - * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST |
458 |
| - * instruction in the AMD64 APM Volume 3). |
459 |
| - */ |
460 |
| - attrs = 1; |
461 |
| - if (vmsa) |
462 |
| - attrs |= RMPADJUST_VMSA_PAGE_BIT; |
463 |
| - |
464 |
| - return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); |
465 |
| -} |
466 |
| - |
467 |
| -static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) |
468 |
| -{ |
469 |
| - int err; |
470 |
| - |
471 |
| - err = snp_set_vmsa(vmsa, false); |
472 |
| - if (err) |
473 |
| - pr_err("clear VMSA page failed (%u), leaking page\n", err); |
474 |
| - else |
475 |
| - free_page((unsigned long)vmsa); |
476 |
| -} |
477 |
| - |
478 |
| -int hv_snp_boot_ap(int cpu, unsigned long start_ip) |
479 |
| -{ |
480 |
| - struct sev_es_save_area *vmsa = (struct sev_es_save_area *) |
481 |
| - __get_free_page(GFP_KERNEL | __GFP_ZERO); |
482 |
| - struct sev_es_save_area *cur_vmsa; |
483 |
| - struct desc_ptr gdtr; |
484 |
| - u64 ret, retry = 5; |
485 |
| - struct hv_enable_vp_vtl *start_vp_input; |
486 |
| - unsigned long flags; |
487 |
| - |
488 |
| - if (!vmsa) |
489 |
| - return -ENOMEM; |
490 |
| - |
491 |
| - native_store_gdt(&gdtr); |
492 |
| - |
493 |
| - vmsa->gdtr.base = gdtr.address; |
494 |
| - vmsa->gdtr.limit = gdtr.size; |
495 |
| - |
496 |
| - asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector)); |
497 |
| - hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); |
498 |
| - |
499 |
| - asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector)); |
500 |
| - hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); |
501 |
| - |
502 |
| - asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector)); |
503 |
| - hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); |
504 |
| - |
505 |
| - asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); |
506 |
| - hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); |
507 |
| - |
508 |
| - vmsa->efer = native_read_msr(MSR_EFER); |
509 |
| - |
510 |
| - asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4)); |
511 |
| - asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3)); |
512 |
| - asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0)); |
513 |
| - |
514 |
| - vmsa->xcr0 = 1; |
515 |
| - vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; |
516 |
| - vmsa->rip = (u64)secondary_startup_64_no_verify; |
517 |
| - vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; |
518 |
| - |
519 |
| - /* |
520 |
| - * Set the SNP-specific fields for this VMSA: |
521 |
| - * VMPL level |
522 |
| - * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) |
523 |
| - */ |
524 |
| - vmsa->vmpl = 0; |
525 |
| - vmsa->sev_features = sev_status >> 2; |
526 |
| - |
527 |
| - ret = snp_set_vmsa(vmsa, true); |
528 |
| - if (!ret) { |
529 |
| - pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); |
530 |
| - free_page((u64)vmsa); |
531 |
| - return ret; |
532 |
| - } |
533 |
| - |
534 |
| - local_irq_save(flags); |
535 |
| - start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg; |
536 |
| - memset(start_vp_input, 0, sizeof(*start_vp_input)); |
537 |
| - start_vp_input->partition_id = -1; |
538 |
| - start_vp_input->vp_index = cpu; |
539 |
| - start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; |
540 |
| - *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; |
541 |
| - |
542 |
| - do { |
543 |
| - ret = hv_do_hypercall(HVCALL_START_VP, |
544 |
| - start_vp_input, NULL); |
545 |
| - } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); |
546 |
| - |
547 |
| - local_irq_restore(flags); |
548 |
| - |
549 |
| - if (!hv_result_success(ret)) { |
550 |
| - pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); |
551 |
| - snp_cleanup_vmsa(vmsa); |
552 |
| - vmsa = NULL; |
553 |
| - } |
554 |
| - |
555 |
| - cur_vmsa = per_cpu(hv_sev_vmsa, cpu); |
556 |
| - /* Free up any previous VMSA page */ |
557 |
| - if (cur_vmsa) |
558 |
| - snp_cleanup_vmsa(cur_vmsa); |
559 |
| - |
560 |
| - /* Record the current VMSA page */ |
561 |
| - per_cpu(hv_sev_vmsa, cpu) = vmsa; |
562 |
| - |
563 |
| - return ret; |
564 |
| -} |
565 |
| - |
566 |
| -#endif /* CONFIG_AMD_MEM_ENCRYPT */ |
567 |
| - |
568 |
| -#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) |
569 |
| - |
570 | 578 | void __init hv_vtom_init(void)
|
571 | 579 | {
|
572 | 580 | enum hv_isolation_type type = hv_get_isolation_type();
|
@@ -654,20 +662,3 @@ bool hv_isolation_type_tdx(void)
|
654 | 662 | {
|
655 | 663 | return static_branch_unlikely(&isolation_type_tdx);
|
656 | 664 | }
|
657 |
| - |
658 |
| -#ifdef CONFIG_INTEL_TDX_GUEST |
659 |
| - |
660 |
| -u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) |
661 |
| -{ |
662 |
| - struct tdx_hypercall_args args = { }; |
663 |
| - |
664 |
| - args.r10 = control; |
665 |
| - args.rdx = param1; |
666 |
| - args.r8 = param2; |
667 |
| - |
668 |
| - (void)__tdx_hypercall_ret(&args); |
669 |
| - |
670 |
| - return args.r11; |
671 |
| -} |
672 |
| - |
673 |
| -#endif |
0 commit comments