@@ -2574,6 +2574,9 @@ EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
2574
2574
2575
2575
static void kvm_vcpu_write_tsc_offset (struct kvm_vcpu * vcpu , u64 l1_offset )
2576
2576
{
2577
+ if (vcpu -> arch .guest_tsc_protected )
2578
+ return ;
2579
+
2577
2580
trace_kvm_write_tsc_offset (vcpu -> vcpu_id ,
2578
2581
vcpu -> arch .l1_tsc_offset ,
2579
2582
l1_offset );
@@ -2631,12 +2634,18 @@ static inline bool kvm_check_tsc_unstable(void)
2631
2634
* participates in.
2632
2635
*/
2633
2636
static void __kvm_synchronize_tsc (struct kvm_vcpu * vcpu , u64 offset , u64 tsc ,
2634
- u64 ns , bool matched )
2637
+ u64 ns , bool matched , bool user_set_tsc )
2635
2638
{
2636
2639
struct kvm * kvm = vcpu -> kvm ;
2637
2640
2638
2641
lockdep_assert_held (& kvm -> arch .tsc_write_lock );
2639
2642
2643
+ if (vcpu -> arch .guest_tsc_protected )
2644
+ return ;
2645
+
2646
+ if (user_set_tsc )
2647
+ vcpu -> kvm -> arch .user_set_tsc = true;
2648
+
2640
2649
/*
2641
2650
* We also track th most recent recorded KHZ, write and time to
2642
2651
* allow the matching interval to be extended at each write.
@@ -2722,8 +2731,6 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
2722
2731
}
2723
2732
}
2724
2733
2725
- if (user_value )
2726
- kvm -> arch .user_set_tsc = true;
2727
2734
2728
2735
/*
2729
2736
* For a reliable TSC, we can match TSC offsets, and for an unstable
@@ -2743,7 +2750,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
2743
2750
matched = true;
2744
2751
}
2745
2752
2746
- __kvm_synchronize_tsc (vcpu , offset , data , ns , matched );
2753
+ __kvm_synchronize_tsc (vcpu , offset , data , ns , matched , !! user_value );
2747
2754
raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
2748
2755
}
2749
2756
@@ -3923,7 +3930,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3923
3930
case MSR_IA32_TSC :
3924
3931
if (msr_info -> host_initiated ) {
3925
3932
kvm_synchronize_tsc (vcpu , & data );
3926
- } else {
3933
+ } else if (! vcpu -> arch . guest_tsc_protected ) {
3927
3934
u64 adj = kvm_compute_l1_tsc_offset (vcpu , data ) - vcpu -> arch .l1_tsc_offset ;
3928
3935
adjust_tsc_offset_guest (vcpu , adj );
3929
3936
vcpu -> arch .ia32_tsc_adjust_msr += adj ;
@@ -4590,6 +4597,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
4590
4597
return type < 32 && (kvm_caps .supported_vm_types & BIT (type ));
4591
4598
}
4592
4599
4600
+ static inline u32 kvm_sync_valid_fields (struct kvm * kvm )
4601
+ {
4602
+ return kvm && kvm -> arch .has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS ;
4603
+ }
4604
+
4593
4605
int kvm_vm_ioctl_check_extension (struct kvm * kvm , long ext )
4594
4606
{
4595
4607
int r = 0 ;
@@ -4698,7 +4710,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4698
4710
break ;
4699
4711
#endif
4700
4712
case KVM_CAP_SYNC_REGS :
4701
- r = KVM_SYNC_X86_VALID_FIELDS ;
4713
+ r = kvm_sync_valid_fields ( kvm ) ;
4702
4714
break ;
4703
4715
case KVM_CAP_ADJUST_CLOCK :
4704
4716
r = KVM_CLOCK_VALID_FLAGS ;
@@ -5003,7 +5015,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
5003
5015
u64 offset = kvm_compute_l1_tsc_offset (vcpu ,
5004
5016
vcpu -> arch .last_guest_tsc );
5005
5017
kvm_vcpu_write_tsc_offset (vcpu , offset );
5006
- vcpu -> arch .tsc_catchup = 1 ;
5018
+ if (!vcpu -> arch .guest_tsc_protected )
5019
+ vcpu -> arch .tsc_catchup = 1 ;
5007
5020
}
5008
5021
5009
5022
if (kvm_lapic_hv_timer_in_use (vcpu ))
@@ -5742,8 +5755,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
5742
5755
tsc = kvm_scale_tsc (rdtsc (), vcpu -> arch .l1_tsc_scaling_ratio ) + offset ;
5743
5756
ns = get_kvmclock_base_ns ();
5744
5757
5745
- kvm -> arch .user_set_tsc = true;
5746
- __kvm_synchronize_tsc (vcpu , offset , tsc , ns , matched );
5758
+ __kvm_synchronize_tsc (vcpu , offset , tsc , ns , matched , true);
5747
5759
raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
5748
5760
5749
5761
r = 0 ;
@@ -11480,6 +11492,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11480
11492
{
11481
11493
struct kvm_queued_exception * ex = & vcpu -> arch .exception ;
11482
11494
struct kvm_run * kvm_run = vcpu -> run ;
11495
+ u32 sync_valid_fields ;
11483
11496
int r ;
11484
11497
11485
11498
r = kvm_mmu_post_init_vm (vcpu -> kvm );
@@ -11525,8 +11538,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11525
11538
goto out ;
11526
11539
}
11527
11540
11528
- if ((kvm_run -> kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS ) ||
11529
- (kvm_run -> kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS )) {
11541
+ sync_valid_fields = kvm_sync_valid_fields (vcpu -> kvm );
11542
+ if ((kvm_run -> kvm_valid_regs & ~sync_valid_fields ) ||
11543
+ (kvm_run -> kvm_dirty_regs & ~sync_valid_fields )) {
11530
11544
r = - EINVAL ;
11531
11545
goto out ;
11532
11546
}
@@ -11584,7 +11598,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11584
11598
11585
11599
out :
11586
11600
kvm_put_guest_fpu (vcpu );
11587
- if (kvm_run -> kvm_valid_regs )
11601
+ if (kvm_run -> kvm_valid_regs && likely (! vcpu -> arch . guest_state_protected ) )
11588
11602
store_regs (vcpu );
11589
11603
post_kvm_run_save (vcpu );
11590
11604
kvm_vcpu_srcu_read_unlock (vcpu );
@@ -12874,7 +12888,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
12874
12888
mutex_unlock (& kvm -> slots_lock );
12875
12889
}
12876
12890
kvm_destroy_vcpus (kvm );
12877
- kvm_x86_call (vm_destroy )(kvm );
12878
12891
kvm_free_msr_filter (srcu_dereference_check (kvm -> arch .msr_filter , & kvm -> srcu , 1 ));
12879
12892
kvm_pic_destroy (kvm );
12880
12893
kvm_ioapic_destroy (kvm );
@@ -12884,6 +12897,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
12884
12897
kvm_page_track_cleanup (kvm );
12885
12898
kvm_xen_destroy_vm (kvm );
12886
12899
kvm_hv_destroy_vm (kvm );
12900
+ kvm_x86_call (vm_destroy )(kvm );
12887
12901
}
12888
12902
12889
12903
static void memslot_rmap_free (struct kvm_memory_slot * slot )
0 commit comments