Skip to content

Commit 99dae3b

Browse files
paulusmackagraf
authored andcommitted
KVM: PPC: Load/save FP/VMX/VSX state directly to/from vcpu struct
Now that we have the vcpu floating-point and vector state stored in the same type of struct as the main kernel uses, we can load that state directly from the vcpu struct instead of having extra copies to/from the thread_struct. Similarly, when the guest state needs to be saved, we can have it saved it directly to the vcpu struct by setting the current->thread.fp_save_area and current->thread.vr_save_area pointers. That also means that we don't need to back up and restore userspace's FP/vector state. This all makes the code simpler and faster. Note that it's not necessary to save or modify current->thread.fpexc_mode, since nothing in KVM uses or is affected by its value. Nor is it necessary to touch used_vr or used_vsr. Signed-off-by: Paul Mackerras <[email protected]> Signed-off-by: Alexander Graf <[email protected]>
1 parent efff191 commit 99dae3b

File tree

3 files changed

+19
-73
lines changed

3 files changed

+19
-73
lines changed

arch/powerpc/kvm/book3s_pr.c

Lines changed: 16 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -567,16 +567,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
567567
* both the traditional FP registers and the added VSX
568568
* registers into thread.fp_state.fpr[].
569569
*/
570-
if (current->thread.regs->msr & MSR_FP)
570+
if (t->regs->msr & MSR_FP)
571571
giveup_fpu(current);
572-
vcpu->arch.fp = t->fp_state;
572+
t->fp_save_area = NULL;
573573
}
574574

575575
#ifdef CONFIG_ALTIVEC
576576
if (msr & MSR_VEC) {
577577
if (current->thread.regs->msr & MSR_VEC)
578578
giveup_altivec(current);
579-
vcpu->arch.vr = t->vr_state;
579+
t->vr_save_area = NULL;
580580
}
581581
#endif
582582

@@ -661,22 +661,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
661661
#endif
662662

663663
if (msr & MSR_FP) {
664-
t->fp_state = vcpu->arch.fp;
665-
t->fpexc_mode = 0;
666664
enable_kernel_fp();
667-
load_fp_state(&t->fp_state);
665+
load_fp_state(&vcpu->arch.fp);
666+
t->fp_save_area = &vcpu->arch.fp;
668667
}
669668

670669
if (msr & MSR_VEC) {
671670
#ifdef CONFIG_ALTIVEC
672-
t->vr_state = vcpu->arch.vr;
673-
t->vrsave = -1;
674671
enable_kernel_altivec();
675-
load_vr_state(&t->vr_state);
672+
load_vr_state(&vcpu->arch.vr);
673+
t->vr_save_area = &vcpu->arch.vr;
676674
#endif
677675
}
678676

679-
current->thread.regs->msr |= msr;
677+
t->regs->msr |= msr;
680678
vcpu->arch.guest_owned_ext |= msr;
681679
kvmppc_recalc_shadow_msr(vcpu);
682680

@@ -697,12 +695,12 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
697695

698696
if (lost_ext & MSR_FP) {
699697
enable_kernel_fp();
700-
load_fp_state(&current->thread.fp_state);
698+
load_fp_state(&vcpu->arch.fp);
701699
}
702700
#ifdef CONFIG_ALTIVEC
703701
if (lost_ext & MSR_VEC) {
704702
enable_kernel_altivec();
705-
load_vr_state(&current->thread.vr_state);
703+
load_vr_state(&vcpu->arch.vr);
706704
}
707705
#endif
708706
current->thread.regs->msr |= lost_ext;
@@ -1204,17 +1202,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
12041202
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
12051203
{
12061204
int ret;
1207-
struct thread_fp_state fp;
1208-
int fpexc_mode;
12091205
#ifdef CONFIG_ALTIVEC
1210-
struct thread_vr_state vr;
12111206
unsigned long uninitialized_var(vrsave);
1212-
int used_vr;
12131207
#endif
1214-
#ifdef CONFIG_VSX
1215-
int used_vsr;
1216-
#endif
1217-
ulong ext_msr;
12181208

12191209
/* Check if we can run the vcpu at all */
12201210
if (!vcpu->arch.sane) {
@@ -1236,33 +1226,22 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
12361226
goto out;
12371227
}
12381228

1239-
/* Save FPU state in stack */
1229+
/* Save FPU state in thread_struct */
12401230
if (current->thread.regs->msr & MSR_FP)
12411231
giveup_fpu(current);
1242-
fp = current->thread.fp_state;
1243-
fpexc_mode = current->thread.fpexc_mode;
12441232

12451233
#ifdef CONFIG_ALTIVEC
1246-
/* Save Altivec state in stack */
1247-
used_vr = current->thread.used_vr;
1248-
if (used_vr) {
1249-
if (current->thread.regs->msr & MSR_VEC)
1250-
giveup_altivec(current);
1251-
vr = current->thread.vr_state;
1252-
vrsave = current->thread.vrsave;
1253-
}
1234+
/* Save Altivec state in thread_struct */
1235+
if (current->thread.regs->msr & MSR_VEC)
1236+
giveup_altivec(current);
12541237
#endif
12551238

12561239
#ifdef CONFIG_VSX
1257-
/* Save VSX state in stack */
1258-
used_vsr = current->thread.used_vsr;
1259-
if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1240+
/* Save VSX state in thread_struct */
1241+
if (current->thread.regs->msr & MSR_VSX)
12601242
__giveup_vsx(current);
12611243
#endif
12621244

1263-
/* Remember the MSR with disabled extensions */
1264-
ext_msr = current->thread.regs->msr;
1265-
12661245
/* Preload FPU if it's enabled */
12671246
if (vcpu->arch.shared->msr & MSR_FP)
12681247
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -1277,25 +1256,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
12771256
/* Make sure we save the guest FPU/Altivec/VSX state */
12781257
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
12791258

1280-
current->thread.regs->msr = ext_msr;
1281-
1282-
/* Restore FPU/VSX state from stack */
1283-
current->thread.fp_state = fp;
1284-
current->thread.fpexc_mode = fpexc_mode;
1285-
1286-
#ifdef CONFIG_ALTIVEC
1287-
/* Restore Altivec state from stack */
1288-
if (used_vr && current->thread.used_vr) {
1289-
current->thread.vr_state = vr;
1290-
current->thread.vrsave = vrsave;
1291-
}
1292-
current->thread.used_vr = used_vr;
1293-
#endif
1294-
1295-
#ifdef CONFIG_VSX
1296-
current->thread.used_vsr = used_vsr;
1297-
#endif
1298-
12991259
out:
13001260
vcpu->mode = OUTSIDE_GUEST_MODE;
13011261
return ret;

arch/powerpc/kvm/booke.c

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -682,10 +682,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682682
{
683683
int ret, s;
684684
struct thread_struct thread;
685-
#ifdef CONFIG_PPC_FPU
686-
struct thread_fp_state fp;
687-
int fpexc_mode;
688-
#endif
689685

690686
if (!vcpu->arch.sane) {
691687
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -703,11 +699,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
703699
#ifdef CONFIG_PPC_FPU
704700
/* Save userspace FPU state in stack */
705701
enable_kernel_fp();
706-
fp = current->thread.fp_state;
707-
fpexc_mode = current->thread.fpexc_mode;
708-
709-
/* Restore guest FPU state to thread */
710-
current->thread.fp_state = vcpu->arch.fp;
711702

712703
/*
713704
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -741,13 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
741732
kvmppc_save_guest_fp(vcpu);
742733

743734
vcpu->fpu_active = 0;
744-
745-
/* Save guest FPU state from thread */
746-
vcpu->arch.fp = current->thread.fp_state;
747-
748-
/* Restore userspace FPU state from stack */
749-
current->thread.fp_state = fp;
750-
current->thread.fpexc_mode = fpexc_mode;
751735
#endif
752736

753737
out:

arch/powerpc/kvm/booke.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,8 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
137137
#ifdef CONFIG_PPC_FPU
138138
if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
139139
enable_kernel_fp();
140-
load_fp_state(&current->thread.fp_state);
140+
load_fp_state(&vcpu->arch.fp);
141+
current->thread.fp_save_area = &vcpu->arch.fp;
141142
current->thread.regs->msr |= MSR_FP;
142143
}
143144
#endif
@@ -152,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
152153
#ifdef CONFIG_PPC_FPU
153154
if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
154155
giveup_fpu(current);
156+
current->thread.fp_save_area = NULL;
155157
#endif
156158
}
157159

0 commit comments

Comments
 (0)