@@ -567,16 +567,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
567
567
* both the traditional FP registers and the added VSX
568
568
* registers into thread.fp_state.fpr[].
569
569
*/
570
- if (current -> thread . regs -> msr & MSR_FP )
570
+ if (t -> regs -> msr & MSR_FP )
571
571
giveup_fpu (current );
572
- vcpu -> arch . fp = t -> fp_state ;
572
+ t -> fp_save_area = NULL ;
573
573
}
574
574
575
575
#ifdef CONFIG_ALTIVEC
576
576
if (msr & MSR_VEC ) {
577
577
if (current -> thread .regs -> msr & MSR_VEC )
578
578
giveup_altivec (current );
579
- vcpu -> arch . vr = t -> vr_state ;
579
+ t -> vr_save_area = NULL ;
580
580
}
581
581
#endif
582
582
@@ -661,22 +661,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
661
661
#endif
662
662
663
663
if (msr & MSR_FP ) {
664
- t -> fp_state = vcpu -> arch .fp ;
665
- t -> fpexc_mode = 0 ;
666
664
enable_kernel_fp ();
667
- load_fp_state (& t -> fp_state );
665
+ load_fp_state (& vcpu -> arch .fp );
666
+ t -> fp_save_area = & vcpu -> arch .fp ;
668
667
}
669
668
670
669
if (msr & MSR_VEC ) {
671
670
#ifdef CONFIG_ALTIVEC
672
- t -> vr_state = vcpu -> arch .vr ;
673
- t -> vrsave = -1 ;
674
671
enable_kernel_altivec ();
675
- load_vr_state (& t -> vr_state );
672
+ load_vr_state (& vcpu -> arch .vr );
673
+ t -> vr_save_area = & vcpu -> arch .vr ;
676
674
#endif
677
675
}
678
676
679
- current -> thread . regs -> msr |= msr ;
677
+ t -> regs -> msr |= msr ;
680
678
vcpu -> arch .guest_owned_ext |= msr ;
681
679
kvmppc_recalc_shadow_msr (vcpu );
682
680
@@ -697,12 +695,12 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
697
695
698
696
if (lost_ext & MSR_FP ) {
699
697
enable_kernel_fp ();
700
- load_fp_state (& current -> thread . fp_state );
698
+ load_fp_state (& vcpu -> arch . fp );
701
699
}
702
700
#ifdef CONFIG_ALTIVEC
703
701
if (lost_ext & MSR_VEC ) {
704
702
enable_kernel_altivec ();
705
- load_vr_state (& current -> thread . vr_state );
703
+ load_vr_state (& vcpu -> arch . vr );
706
704
}
707
705
#endif
708
706
current -> thread .regs -> msr |= lost_ext ;
@@ -1204,17 +1202,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1204
1202
static int kvmppc_vcpu_run_pr (struct kvm_run * kvm_run , struct kvm_vcpu * vcpu )
1205
1203
{
1206
1204
int ret ;
1207
- struct thread_fp_state fp ;
1208
- int fpexc_mode ;
1209
1205
#ifdef CONFIG_ALTIVEC
1210
- struct thread_vr_state vr ;
1211
1206
unsigned long uninitialized_var (vrsave );
1212
- int used_vr ;
1213
1207
#endif
1214
- #ifdef CONFIG_VSX
1215
- int used_vsr ;
1216
- #endif
1217
- ulong ext_msr ;
1218
1208
1219
1209
/* Check if we can run the vcpu at all */
1220
1210
if (!vcpu -> arch .sane ) {
@@ -1236,33 +1226,22 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1236
1226
goto out ;
1237
1227
}
1238
1228
1239
- /* Save FPU state in stack */
1229
+ /* Save FPU state in thread_struct */
1240
1230
if (current -> thread .regs -> msr & MSR_FP )
1241
1231
giveup_fpu (current );
1242
- fp = current -> thread .fp_state ;
1243
- fpexc_mode = current -> thread .fpexc_mode ;
1244
1232
1245
1233
#ifdef CONFIG_ALTIVEC
1246
- /* Save Altivec state in stack */
1247
- used_vr = current -> thread .used_vr ;
1248
- if (used_vr ) {
1249
- if (current -> thread .regs -> msr & MSR_VEC )
1250
- giveup_altivec (current );
1251
- vr = current -> thread .vr_state ;
1252
- vrsave = current -> thread .vrsave ;
1253
- }
1234
+ /* Save Altivec state in thread_struct */
1235
+ if (current -> thread .regs -> msr & MSR_VEC )
1236
+ giveup_altivec (current );
1254
1237
#endif
1255
1238
1256
1239
#ifdef CONFIG_VSX
1257
- /* Save VSX state in stack */
1258
- used_vsr = current -> thread .used_vsr ;
1259
- if (used_vsr && (current -> thread .regs -> msr & MSR_VSX ))
1240
+ /* Save VSX state in thread_struct */
1241
+ if (current -> thread .regs -> msr & MSR_VSX )
1260
1242
__giveup_vsx (current );
1261
1243
#endif
1262
1244
1263
- /* Remember the MSR with disabled extensions */
1264
- ext_msr = current -> thread .regs -> msr ;
1265
-
1266
1245
/* Preload FPU if it's enabled */
1267
1246
if (vcpu -> arch .shared -> msr & MSR_FP )
1268
1247
kvmppc_handle_ext (vcpu , BOOK3S_INTERRUPT_FP_UNAVAIL , MSR_FP );
@@ -1277,25 +1256,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1277
1256
/* Make sure we save the guest FPU/Altivec/VSX state */
1278
1257
kvmppc_giveup_ext (vcpu , MSR_FP | MSR_VEC | MSR_VSX );
1279
1258
1280
- current -> thread .regs -> msr = ext_msr ;
1281
-
1282
- /* Restore FPU/VSX state from stack */
1283
- current -> thread .fp_state = fp ;
1284
- current -> thread .fpexc_mode = fpexc_mode ;
1285
-
1286
- #ifdef CONFIG_ALTIVEC
1287
- /* Restore Altivec state from stack */
1288
- if (used_vr && current -> thread .used_vr ) {
1289
- current -> thread .vr_state = vr ;
1290
- current -> thread .vrsave = vrsave ;
1291
- }
1292
- current -> thread .used_vr = used_vr ;
1293
- #endif
1294
-
1295
- #ifdef CONFIG_VSX
1296
- current -> thread .used_vsr = used_vsr ;
1297
- #endif
1298
-
1299
1259
out :
1300
1260
vcpu -> mode = OUTSIDE_GUEST_MODE ;
1301
1261
return ret ;
0 commit comments