Skip to content

Commit 34e119c

Browse files
npigginmpe
authored andcommitted
KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save host SPRs
This reduces the number of mtmsrd required to enable facility bits when saving/restoring registers, by having the KVM code set all bits up front rather than using individual facility functions that set their particular MSR bits. Signed-off-by: Nicholas Piggin <[email protected]> Reviewed-by: Fabiano Rosas <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 174a3ab commit 34e119c

File tree

4 files changed

+71
-19
lines changed

4 files changed

+71
-19
lines changed

arch/powerpc/include/asm/switch_to.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,8 @@ static inline void clear_task_ebb(struct task_struct *t)
112112
#endif
113113
}
114114

115+
void kvmppc_save_user_regs(void);
116+
115117
extern int set_thread_tidr(struct task_struct *t);
116118

117119
#endif /* _ASM_POWERPC_SWITCH_TO_H */

arch/powerpc/kernel/process.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1156,6 +1156,34 @@ static inline void save_sprs(struct thread_struct *t)
11561156
#endif
11571157
}
11581158

1159+
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1160+
void kvmppc_save_user_regs(void)
1161+
{
1162+
unsigned long usermsr;
1163+
1164+
if (!current->thread.regs)
1165+
return;
1166+
1167+
usermsr = current->thread.regs->msr;
1168+
1169+
if (usermsr & MSR_FP)
1170+
save_fpu(current);
1171+
1172+
if (usermsr & MSR_VEC)
1173+
save_altivec(current);
1174+
1175+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1176+
if (usermsr & MSR_TM) {
1177+
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
1178+
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
1179+
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
1180+
current->thread.regs->msr &= ~MSR_TM;
1181+
}
1182+
#endif
1183+
}
1184+
EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
1185+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1186+
11591187
static inline void restore_sprs(struct thread_struct *old_thread,
11601188
struct thread_struct *new_thread)
11611189
{

arch/powerpc/kvm/book3s_hv.c

Lines changed: 40 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -4153,6 +4153,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
41534153
struct p9_host_os_sprs host_os_sprs;
41544154
s64 dec;
41554155
u64 tb, next_timer;
4156+
unsigned long msr;
41564157
int trap;
41574158

41584159
WARN_ON_ONCE(vcpu->arch.ceded);
@@ -4164,8 +4165,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
41644165
if (next_timer < time_limit)
41654166
time_limit = next_timer;
41664167

4168+
vcpu->arch.ceded = 0;
4169+
41674170
save_p9_host_os_sprs(&host_os_sprs);
41684171

4172+
/* MSR bits may have been cleared by context switch */
4173+
msr = 0;
4174+
if (IS_ENABLED(CONFIG_PPC_FPU))
4175+
msr |= MSR_FP;
4176+
if (cpu_has_feature(CPU_FTR_ALTIVEC))
4177+
msr |= MSR_VEC;
4178+
if (cpu_has_feature(CPU_FTR_VSX))
4179+
msr |= MSR_VSX;
4180+
if (cpu_has_feature(CPU_FTR_TM) ||
4181+
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
4182+
msr |= MSR_TM;
4183+
msr = msr_check_and_set(msr);
4184+
41694185
kvmppc_subcore_enter_guest();
41704186

41714187
vc->entry_exit_map = 1;
@@ -4174,12 +4190,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
41744190
vcpu_vpa_increment_dispatch(vcpu);
41754191

41764192
if (cpu_has_feature(CPU_FTR_TM) ||
4177-
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
4193+
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
41784194
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
4195+
msr = mfmsr(); /* TM restore can update msr */
4196+
}
41794197

41804198
switch_pmu_to_guest(vcpu, &host_os_sprs);
41814199

4182-
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
41834200
load_fp_state(&vcpu->arch.fp);
41844201
#ifdef CONFIG_ALTIVEC
41854202
load_vr_state(&vcpu->arch.vr);
@@ -4288,7 +4305,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
42884305

42894306
restore_p9_host_os_sprs(vcpu, &host_os_sprs);
42904307

4291-
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
42924308
store_fp_state(&vcpu->arch.fp);
42934309
#ifdef CONFIG_ALTIVEC
42944310
store_vr_state(&vcpu->arch.vr);
@@ -4851,32 +4867,31 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
48514867
unsigned long user_tar = 0;
48524868
unsigned int user_vrsave;
48534869
struct kvm *kvm;
4870+
unsigned long msr;
48544871

48554872
if (!vcpu->arch.sane) {
48564873
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
48574874
return -EINVAL;
48584875
}
48594876

4877+
/* No need to go into the guest when all we'll do is come back out */
4878+
if (signal_pending(current)) {
4879+
run->exit_reason = KVM_EXIT_INTR;
4880+
return -EINTR;
4881+
}
4882+
4883+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
48604884
/*
48614885
* Don't allow entry with a suspended transaction, because
48624886
* the guest entry/exit code will lose it.
4863-
* If the guest has TM enabled, save away their TM-related SPRs
4864-
* (they will get restored by the TM unavailable interrupt).
48654887
*/
4866-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
48674888
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
48684889
(current->thread.regs->msr & MSR_TM)) {
48694890
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
48704891
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
48714892
run->fail_entry.hardware_entry_failure_reason = 0;
48724893
return -EINVAL;
48734894
}
4874-
/* Enable TM so we can read the TM SPRs */
4875-
mtmsr(mfmsr() | MSR_TM);
4876-
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
4877-
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
4878-
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
4879-
current->thread.regs->msr &= ~MSR_TM;
48804895
}
48814896
#endif
48824897

@@ -4891,18 +4906,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
48914906

48924907
kvmppc_core_prepare_to_enter(vcpu);
48934908

4894-
/* No need to go into the guest when all we'll do is come back out */
4895-
if (signal_pending(current)) {
4896-
run->exit_reason = KVM_EXIT_INTR;
4897-
return -EINTR;
4898-
}
4899-
49004909
kvm = vcpu->kvm;
49014910
atomic_inc(&kvm->arch.vcpus_running);
49024911
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
49034912
smp_mb();
49044913

4905-
flush_all_to_thread(current);
4914+
msr = 0;
4915+
if (IS_ENABLED(CONFIG_PPC_FPU))
4916+
msr |= MSR_FP;
4917+
if (cpu_has_feature(CPU_FTR_ALTIVEC))
4918+
msr |= MSR_VEC;
4919+
if (cpu_has_feature(CPU_FTR_VSX))
4920+
msr |= MSR_VSX;
4921+
if (cpu_has_feature(CPU_FTR_TM) ||
4922+
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
4923+
msr |= MSR_TM;
4924+
msr = msr_check_and_set(msr);
4925+
4926+
kvmppc_save_user_regs();
49064927

49074928
/* Save userspace EBB and other register values */
49084929
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {

arch/powerpc/kvm/book3s_hv_p9_entry.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
224224
vc->tb_offset_applied = vc->tb_offset;
225225
}
226226

227+
/* Could avoid mfmsr by passing around, but probably no big deal */
227228
msr = mfmsr();
228229

229230
host_hfscr = mfspr(SPRN_HFSCR);

0 commit comments

Comments
 (0)