@@ -4153,6 +4153,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
4153
4153
struct p9_host_os_sprs host_os_sprs ;
4154
4154
s64 dec ;
4155
4155
u64 tb , next_timer ;
4156
+ unsigned long msr ;
4156
4157
int trap ;
4157
4158
4158
4159
WARN_ON_ONCE (vcpu -> arch .ceded );
@@ -4164,8 +4165,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
4164
4165
if (next_timer < time_limit )
4165
4166
time_limit = next_timer ;
4166
4167
4168
+ vcpu -> arch .ceded = 0 ;
4169
+
4167
4170
save_p9_host_os_sprs (& host_os_sprs );
4168
4171
4172
+ /* MSR bits may have been cleared by context switch */
4173
+ msr = 0 ;
4174
+ if (IS_ENABLED (CONFIG_PPC_FPU ))
4175
+ msr |= MSR_FP ;
4176
+ if (cpu_has_feature (CPU_FTR_ALTIVEC ))
4177
+ msr |= MSR_VEC ;
4178
+ if (cpu_has_feature (CPU_FTR_VSX ))
4179
+ msr |= MSR_VSX ;
4180
+ if (cpu_has_feature (CPU_FTR_TM ) ||
4181
+ cpu_has_feature (CPU_FTR_P9_TM_HV_ASSIST ))
4182
+ msr |= MSR_TM ;
4183
+ msr = msr_check_and_set (msr );
4184
+
4169
4185
kvmppc_subcore_enter_guest ();
4170
4186
4171
4187
vc -> entry_exit_map = 1 ;
@@ -4174,12 +4190,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
4174
4190
vcpu_vpa_increment_dispatch (vcpu );
4175
4191
4176
4192
if (cpu_has_feature (CPU_FTR_TM ) ||
4177
- cpu_has_feature (CPU_FTR_P9_TM_HV_ASSIST ))
4193
+ cpu_has_feature (CPU_FTR_P9_TM_HV_ASSIST )) {
4178
4194
kvmppc_restore_tm_hv (vcpu , vcpu -> arch .shregs .msr , true);
4195
+ msr = mfmsr (); /* TM restore can update msr */
4196
+ }
4179
4197
4180
4198
switch_pmu_to_guest (vcpu , & host_os_sprs );
4181
4199
4182
- msr_check_and_set (MSR_FP | MSR_VEC | MSR_VSX );
4183
4200
load_fp_state (& vcpu -> arch .fp );
4184
4201
#ifdef CONFIG_ALTIVEC
4185
4202
load_vr_state (& vcpu -> arch .vr );
@@ -4288,7 +4305,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
4288
4305
4289
4306
restore_p9_host_os_sprs (vcpu , & host_os_sprs );
4290
4307
4291
- msr_check_and_set (MSR_FP | MSR_VEC | MSR_VSX );
4292
4308
store_fp_state (& vcpu -> arch .fp );
4293
4309
#ifdef CONFIG_ALTIVEC
4294
4310
store_vr_state (& vcpu -> arch .vr );
@@ -4851,32 +4867,31 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
4851
4867
unsigned long user_tar = 0 ;
4852
4868
unsigned int user_vrsave ;
4853
4869
struct kvm * kvm ;
4870
+ unsigned long msr ;
4854
4871
4855
4872
if (!vcpu -> arch .sane ) {
4856
4873
run -> exit_reason = KVM_EXIT_INTERNAL_ERROR ;
4857
4874
return - EINVAL ;
4858
4875
}
4859
4876
4877
+ /* No need to go into the guest when all we'll do is come back out */
4878
+ if (signal_pending (current )) {
4879
+ run -> exit_reason = KVM_EXIT_INTR ;
4880
+ return - EINTR ;
4881
+ }
4882
+
4883
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4860
4884
/*
4861
4885
* Don't allow entry with a suspended transaction, because
4862
4886
* the guest entry/exit code will lose it.
4863
- * If the guest has TM enabled, save away their TM-related SPRs
4864
- * (they will get restored by the TM unavailable interrupt).
4865
4887
*/
4866
- #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4867
4888
if (cpu_has_feature (CPU_FTR_TM ) && current -> thread .regs &&
4868
4889
(current -> thread .regs -> msr & MSR_TM )) {
4869
4890
if (MSR_TM_ACTIVE (current -> thread .regs -> msr )) {
4870
4891
run -> exit_reason = KVM_EXIT_FAIL_ENTRY ;
4871
4892
run -> fail_entry .hardware_entry_failure_reason = 0 ;
4872
4893
return - EINVAL ;
4873
4894
}
4874
- /* Enable TM so we can read the TM SPRs */
4875
- mtmsr (mfmsr () | MSR_TM );
4876
- current -> thread .tm_tfhar = mfspr (SPRN_TFHAR );
4877
- current -> thread .tm_tfiar = mfspr (SPRN_TFIAR );
4878
- current -> thread .tm_texasr = mfspr (SPRN_TEXASR );
4879
- current -> thread .regs -> msr &= ~MSR_TM ;
4880
4895
}
4881
4896
#endif
4882
4897
@@ -4891,18 +4906,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
4891
4906
4892
4907
kvmppc_core_prepare_to_enter (vcpu );
4893
4908
4894
- /* No need to go into the guest when all we'll do is come back out */
4895
- if (signal_pending (current )) {
4896
- run -> exit_reason = KVM_EXIT_INTR ;
4897
- return - EINTR ;
4898
- }
4899
-
4900
4909
kvm = vcpu -> kvm ;
4901
4910
atomic_inc (& kvm -> arch .vcpus_running );
4902
4911
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
4903
4912
smp_mb ();
4904
4913
4905
- flush_all_to_thread (current );
4914
+ msr = 0 ;
4915
+ if (IS_ENABLED (CONFIG_PPC_FPU ))
4916
+ msr |= MSR_FP ;
4917
+ if (cpu_has_feature (CPU_FTR_ALTIVEC ))
4918
+ msr |= MSR_VEC ;
4919
+ if (cpu_has_feature (CPU_FTR_VSX ))
4920
+ msr |= MSR_VSX ;
4921
+ if (cpu_has_feature (CPU_FTR_TM ) ||
4922
+ cpu_has_feature (CPU_FTR_P9_TM_HV_ASSIST ))
4923
+ msr |= MSR_TM ;
4924
+ msr = msr_check_and_set (msr );
4925
+
4926
+ kvmppc_save_user_regs ();
4906
4927
4907
4928
/* Save userspace EBB and other register values */
4908
4929
if (cpu_has_feature (CPU_FTR_ARCH_207S )) {
0 commit comments