@@ -925,17 +925,11 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
925
925
*
926
926
* This list is modified at module load time to reflect the
927
927
* capabilities of the host cpu. This capabilities test skips MSRs that are
928
- * kvm-specific. Those are put in the beginning of the list.
928
+ * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
929
+ * may depend on host virtualization features rather than host cpu features.
929
930
*/
930
931
931
- #define KVM_SAVE_MSRS_BEGIN 12
932
932
static u32 msrs_to_save [] = {
933
- MSR_KVM_SYSTEM_TIME , MSR_KVM_WALL_CLOCK ,
934
- MSR_KVM_SYSTEM_TIME_NEW , MSR_KVM_WALL_CLOCK_NEW ,
935
- HV_X64_MSR_GUEST_OS_ID , HV_X64_MSR_HYPERCALL ,
936
- HV_X64_MSR_TIME_REF_COUNT , HV_X64_MSR_REFERENCE_TSC ,
937
- HV_X64_MSR_APIC_ASSIST_PAGE , MSR_KVM_ASYNC_PF_EN , MSR_KVM_STEAL_TIME ,
938
- MSR_KVM_PV_EOI_EN ,
939
933
MSR_IA32_SYSENTER_CS , MSR_IA32_SYSENTER_ESP , MSR_IA32_SYSENTER_EIP ,
940
934
MSR_STAR ,
941
935
#ifdef CONFIG_X86_64
@@ -947,14 +941,23 @@ static u32 msrs_to_save[] = {
947
941
948
942
static unsigned num_msrs_to_save ;
949
943
950
- static const u32 emulated_msrs [] = {
944
+ static u32 emulated_msrs [] = {
945
+ MSR_KVM_SYSTEM_TIME , MSR_KVM_WALL_CLOCK ,
946
+ MSR_KVM_SYSTEM_TIME_NEW , MSR_KVM_WALL_CLOCK_NEW ,
947
+ HV_X64_MSR_GUEST_OS_ID , HV_X64_MSR_HYPERCALL ,
948
+ HV_X64_MSR_TIME_REF_COUNT , HV_X64_MSR_REFERENCE_TSC ,
949
+ HV_X64_MSR_APIC_ASSIST_PAGE , MSR_KVM_ASYNC_PF_EN , MSR_KVM_STEAL_TIME ,
950
+ MSR_KVM_PV_EOI_EN ,
951
+
951
952
MSR_IA32_TSC_ADJUST ,
952
953
MSR_IA32_TSCDEADLINE ,
953
954
MSR_IA32_MISC_ENABLE ,
954
955
MSR_IA32_MCG_STATUS ,
955
956
MSR_IA32_MCG_CTL ,
956
957
};
957
958
959
+ static unsigned num_emulated_msrs ;
960
+
958
961
bool kvm_valid_efer (struct kvm_vcpu * vcpu , u64 efer )
959
962
{
960
963
if (efer & efer_reserved_bits )
@@ -2928,7 +2931,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
2928
2931
if (copy_from_user (& msr_list , user_msr_list , sizeof msr_list ))
2929
2932
goto out ;
2930
2933
n = msr_list .nmsrs ;
2931
- msr_list .nmsrs = num_msrs_to_save + ARRAY_SIZE ( emulated_msrs ) ;
2934
+ msr_list .nmsrs = num_msrs_to_save + num_emulated_msrs ;
2932
2935
if (copy_to_user (user_msr_list , & msr_list , sizeof msr_list ))
2933
2936
goto out ;
2934
2937
r = - E2BIG ;
@@ -2940,7 +2943,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
2940
2943
goto out ;
2941
2944
if (copy_to_user (user_msr_list -> indices + num_msrs_to_save ,
2942
2945
& emulated_msrs ,
2943
- ARRAY_SIZE ( emulated_msrs ) * sizeof (u32 )))
2946
+ num_emulated_msrs * sizeof (u32 )))
2944
2947
goto out ;
2945
2948
r = 0 ;
2946
2949
break ;
@@ -4206,8 +4209,7 @@ static void kvm_init_msr_list(void)
4206
4209
u32 dummy [2 ];
4207
4210
unsigned i , j ;
4208
4211
4209
- /* skip the first msrs in the list. KVM-specific */
4210
- for (i = j = KVM_SAVE_MSRS_BEGIN ; i < ARRAY_SIZE (msrs_to_save ); i ++ ) {
4212
+ for (i = j = 0 ; i < ARRAY_SIZE (msrs_to_save ); i ++ ) {
4211
4213
if (rdmsr_safe (msrs_to_save [i ], & dummy [0 ], & dummy [1 ]) < 0 )
4212
4214
continue ;
4213
4215
@@ -4232,6 +4234,18 @@ static void kvm_init_msr_list(void)
4232
4234
j ++ ;
4233
4235
}
4234
4236
num_msrs_to_save = j ;
4237
+
4238
+ for (i = j = 0 ; i < ARRAY_SIZE (emulated_msrs ); i ++ ) {
4239
+ switch (emulated_msrs [i ]) {
4240
+ default :
4241
+ break ;
4242
+ }
4243
+
4244
+ if (j < i )
4245
+ emulated_msrs [j ] = emulated_msrs [i ];
4246
+ j ++ ;
4247
+ }
4248
+ num_emulated_msrs = j ;
4235
4249
}
4236
4250
4237
4251
static int vcpu_mmio_write (struct kvm_vcpu * vcpu , gpa_t addr , int len ,
0 commit comments