@@ -295,8 +295,6 @@ u64 __read_mostly host_xcr0;
295
295
u64 __read_mostly supported_xcr0 ;
296
296
EXPORT_SYMBOL_GPL (supported_xcr0 );
297
297
298
- static struct kmem_cache * x86_fpu_cache ;
299
-
300
298
static struct kmem_cache * x86_emulator_cache ;
301
299
302
300
/*
@@ -4705,23 +4703,24 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
4705
4703
static void kvm_vcpu_ioctl_x86_get_xsave (struct kvm_vcpu * vcpu ,
4706
4704
struct kvm_xsave * guest_xsave )
4707
4705
{
4708
- if (! vcpu -> arch .guest_fpu )
4706
+ if (fpstate_is_confidential ( & vcpu -> arch .guest_fpu ) )
4709
4707
return ;
4710
4708
4711
- fpu_copy_fpstate_to_kvm_uabi (vcpu -> arch .guest_fpu , guest_xsave -> region ,
4712
- sizeof (guest_xsave -> region ),
4713
- vcpu -> arch .pkru );
4709
+ fpu_copy_guest_fpstate_to_uabi (& vcpu -> arch .guest_fpu ,
4710
+ guest_xsave -> region ,
4711
+ sizeof (guest_xsave -> region ),
4712
+ vcpu -> arch .pkru );
4714
4713
}
4715
4714
4716
4715
static int kvm_vcpu_ioctl_x86_set_xsave (struct kvm_vcpu * vcpu ,
4717
4716
struct kvm_xsave * guest_xsave )
4718
4717
{
4719
- if (! vcpu -> arch .guest_fpu )
4718
+ if (fpstate_is_confidential ( & vcpu -> arch .guest_fpu ) )
4720
4719
return 0 ;
4721
4720
4722
- return fpu_copy_kvm_uabi_to_fpstate ( vcpu -> arch .guest_fpu ,
4723
- guest_xsave -> region ,
4724
- supported_xcr0 , & vcpu -> arch .pkru );
4721
+ return fpu_copy_uabi_to_guest_fpstate ( & vcpu -> arch .guest_fpu ,
4722
+ guest_xsave -> region ,
4723
+ supported_xcr0 , & vcpu -> arch .pkru );
4725
4724
}
4726
4725
4727
4726
static void kvm_vcpu_ioctl_x86_get_xcrs (struct kvm_vcpu * vcpu ,
@@ -8301,18 +8300,11 @@ int kvm_arch_init(void *opaque)
8301
8300
}
8302
8301
8303
8302
r = - ENOMEM ;
8304
- x86_fpu_cache = kmem_cache_create ("x86_fpu" , sizeof (struct fpu ),
8305
- __alignof__(struct fpu ), SLAB_ACCOUNT ,
8306
- NULL );
8307
- if (!x86_fpu_cache ) {
8308
- printk (KERN_ERR "kvm: failed to allocate cache for x86 fpu\n" );
8309
- goto out ;
8310
- }
8311
8303
8312
8304
x86_emulator_cache = kvm_alloc_emulator_cache ();
8313
8305
if (!x86_emulator_cache ) {
8314
8306
pr_err ("kvm: failed to allocate cache for x86 emulator\n" );
8315
- goto out_free_x86_fpu_cache ;
8307
+ goto out ;
8316
8308
}
8317
8309
8318
8310
user_return_msrs = alloc_percpu (struct kvm_user_return_msrs );
@@ -8350,8 +8342,6 @@ int kvm_arch_init(void *opaque)
8350
8342
free_percpu (user_return_msrs );
8351
8343
out_free_x86_emulator_cache :
8352
8344
kmem_cache_destroy (x86_emulator_cache );
8353
- out_free_x86_fpu_cache :
8354
- kmem_cache_destroy (x86_fpu_cache );
8355
8345
out :
8356
8346
return r ;
8357
8347
}
@@ -8378,7 +8368,6 @@ void kvm_arch_exit(void)
8378
8368
kvm_mmu_module_exit ();
8379
8369
free_percpu (user_return_msrs );
8380
8370
kmem_cache_destroy (x86_emulator_cache );
8381
- kmem_cache_destroy (x86_fpu_cache );
8382
8371
#ifdef CONFIG_KVM_XEN
8383
8372
static_key_deferred_flush (& kvm_xen_enabled );
8384
8373
WARN_ON (static_branch_unlikely (& kvm_xen_enabled .key ));
@@ -9801,23 +9790,17 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
9801
9790
static void kvm_load_guest_fpu (struct kvm_vcpu * vcpu )
9802
9791
{
9803
9792
/*
9804
- * Guests with protected state have guest_fpu == NULL which makes
9805
- * the swap only save the host state. Exclude PKRU from restore as
9806
- * it is restored separately in kvm_x86_ops.run().
9793
+ * Exclude PKRU from restore as restored separately in
9794
+ * kvm_x86_ops.run().
9807
9795
*/
9808
- fpu_swap_kvm_fpu (vcpu -> arch .user_fpu , vcpu -> arch .guest_fpu ,
9809
- ~XFEATURE_MASK_PKRU );
9796
+ fpu_swap_kvm_fpstate (& vcpu -> arch .guest_fpu , true);
9810
9797
trace_kvm_fpu (1 );
9811
9798
}
9812
9799
9813
9800
/* When vcpu_run ends, restore user space FPU context. */
9814
9801
static void kvm_put_guest_fpu (struct kvm_vcpu * vcpu )
9815
9802
{
9816
- /*
9817
- * Guests with protected state have guest_fpu == NULL which makes
9818
- * swap only restore the host state.
9819
- */
9820
- fpu_swap_kvm_fpu (vcpu -> arch .guest_fpu , vcpu -> arch .user_fpu , ~0ULL );
9803
+ fpu_swap_kvm_fpstate (& vcpu -> arch .guest_fpu , false);
9821
9804
++ vcpu -> stat .fpu_reload ;
9822
9805
trace_kvm_fpu (0 );
9823
9806
}
@@ -10398,12 +10381,12 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
10398
10381
{
10399
10382
struct fxregs_state * fxsave ;
10400
10383
10401
- if (! vcpu -> arch .guest_fpu )
10384
+ if (fpstate_is_confidential ( & vcpu -> arch .guest_fpu ) )
10402
10385
return 0 ;
10403
10386
10404
10387
vcpu_load (vcpu );
10405
10388
10406
- fxsave = & vcpu -> arch .guest_fpu -> fpstate -> regs .fxsave ;
10389
+ fxsave = & vcpu -> arch .guest_fpu . fpstate -> regs .fxsave ;
10407
10390
memcpy (fpu -> fpr , fxsave -> st_space , 128 );
10408
10391
fpu -> fcw = fxsave -> cwd ;
10409
10392
fpu -> fsw = fxsave -> swd ;
@@ -10421,12 +10404,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
10421
10404
{
10422
10405
struct fxregs_state * fxsave ;
10423
10406
10424
- if (! vcpu -> arch .guest_fpu )
10407
+ if (fpstate_is_confidential ( & vcpu -> arch .guest_fpu ) )
10425
10408
return 0 ;
10426
10409
10427
10410
vcpu_load (vcpu );
10428
10411
10429
- fxsave = & vcpu -> arch .guest_fpu -> fpstate -> regs .fxsave ;
10412
+ fxsave = & vcpu -> arch .guest_fpu . fpstate -> regs .fxsave ;
10430
10413
10431
10414
memcpy (fxsave -> st_space , fpu -> fpr , 128 );
10432
10415
fxsave -> cwd = fpu -> fcw ;
@@ -10487,15 +10470,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
10487
10470
vcpu -> arch .cr0 |= X86_CR0_ET ;
10488
10471
}
10489
10472
10490
- void kvm_free_guest_fpu (struct kvm_vcpu * vcpu )
10491
- {
10492
- if (vcpu -> arch .guest_fpu ) {
10493
- kmem_cache_free (x86_fpu_cache , vcpu -> arch .guest_fpu );
10494
- vcpu -> arch .guest_fpu = NULL ;
10495
- }
10496
- }
10497
- EXPORT_SYMBOL_GPL (kvm_free_guest_fpu );
10498
-
10499
10473
int kvm_arch_vcpu_precreate (struct kvm * kvm , unsigned int id )
10500
10474
{
10501
10475
if (kvm_check_tsc_unstable () && atomic_read (& kvm -> online_vcpus ) != 0 )
@@ -10552,22 +10526,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
10552
10526
if (!alloc_emulate_ctxt (vcpu ))
10553
10527
goto free_wbinvd_dirty_mask ;
10554
10528
10555
- vcpu -> arch .user_fpu = kmem_cache_zalloc (x86_fpu_cache ,
10556
- GFP_KERNEL_ACCOUNT );
10557
- if (!vcpu -> arch .user_fpu ) {
10558
- pr_err ("kvm: failed to allocate userspace's fpu\n" );
10559
- goto free_emulate_ctxt ;
10560
- }
10561
-
10562
- vcpu -> arch .guest_fpu = kmem_cache_zalloc (x86_fpu_cache ,
10563
- GFP_KERNEL_ACCOUNT );
10564
- if (!vcpu -> arch .guest_fpu ) {
10529
+ if (!fpu_alloc_guest_fpstate (& vcpu -> arch .guest_fpu )) {
10565
10530
pr_err ("kvm: failed to allocate vcpu's fpu\n" );
10566
- goto free_user_fpu ;
10531
+ goto free_emulate_ctxt ;
10567
10532
}
10568
10533
10569
- fpu_init_fpstate_user (vcpu -> arch .user_fpu );
10570
- fpu_init_fpstate_user (vcpu -> arch .guest_fpu );
10571
10534
fx_init (vcpu );
10572
10535
10573
10536
vcpu -> arch .maxphyaddr = cpuid_query_maxphyaddr (vcpu );
@@ -10600,9 +10563,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
10600
10563
return 0 ;
10601
10564
10602
10565
free_guest_fpu :
10603
- kvm_free_guest_fpu (vcpu );
10604
- free_user_fpu :
10605
- kmem_cache_free (x86_fpu_cache , vcpu -> arch .user_fpu );
10566
+ fpu_free_guest_fpstate (& vcpu -> arch .guest_fpu );
10606
10567
free_emulate_ctxt :
10607
10568
kmem_cache_free (x86_emulator_cache , vcpu -> arch .emulate_ctxt );
10608
10569
free_wbinvd_dirty_mask :
@@ -10651,8 +10612,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
10651
10612
10652
10613
kmem_cache_free (x86_emulator_cache , vcpu -> arch .emulate_ctxt );
10653
10614
free_cpumask_var (vcpu -> arch .wbinvd_dirty_mask );
10654
- kmem_cache_free (x86_fpu_cache , vcpu -> arch .user_fpu );
10655
- kvm_free_guest_fpu (vcpu );
10615
+ fpu_free_guest_fpstate (& vcpu -> arch .guest_fpu );
10656
10616
10657
10617
kvm_hv_vcpu_uninit (vcpu );
10658
10618
kvm_pmu_destroy (vcpu );
@@ -10704,8 +10664,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
10704
10664
kvm_async_pf_hash_reset (vcpu );
10705
10665
vcpu -> arch .apf .halted = false;
10706
10666
10707
- if (vcpu -> arch .guest_fpu && kvm_mpx_supported ()) {
10708
- struct fpstate * fpstate = vcpu -> arch .guest_fpu -> fpstate ;
10667
+ if (vcpu -> arch .guest_fpu . fpstate && kvm_mpx_supported ()) {
10668
+ struct fpstate * fpstate = vcpu -> arch .guest_fpu . fpstate ;
10709
10669
10710
10670
/*
10711
10671
* To avoid have the INIT path from kvm_apic_has_events() that be
0 commit comments